Index: solr/core/src/test/org/apache/solr/search/SpatialFilterTest.java =================================================================== --- solr/core/src/test/org/apache/solr/search/SpatialFilterTest.java (revision 1381159) +++ solr/core/src/test/org/apache/solr/search/SpatialFilterTest.java (working copy) @@ -119,19 +119,19 @@ checkHits(fieldName, false, "43.517030,-96.789603", 110, 1, 17); - // Tests SOLR-2829 - String fieldNameHome = "home_ll"; - String fieldNameWork = "work_ll"; + // Tests SOLR-2829 + String fieldNameHome = "home_ll"; + String fieldNameWork = "work_ll"; - clearIndex(); - assertU(adoc("id", "1", fieldNameHome, "52.67,7.30", fieldNameWork,"48.60,11.61")); - assertU(commit()); + clearIndex(); + assertU(adoc("id", "1", fieldNameHome, "52.67,7.30", fieldNameWork,"48.60,11.61")); + assertU(commit()); - checkHits(fieldNameHome, "52.67,7.30", 1, 1); - checkHits(fieldNameWork, "48.60,11.61", 1, 1); - checkHits(fieldNameWork, "52.67,7.30", 1, 0); - checkHits(fieldNameHome, "48.60,11.61", 1, 0); - + checkHits(fieldNameHome, "52.67,7.30", 1, 1); + checkHits(fieldNameWork, "48.60,11.61", 1, 1); + checkHits(fieldNameWork, "52.67,7.30", 1, 0); + checkHits(fieldNameHome, "48.60,11.61", 1, 0); + } private void checkHits(String fieldName, String pt, double distance, int count, int ... docIds) { Index: solr/core/src/test/org/apache/solr/search/function/NvlValueSourceParser.java =================================================================== --- solr/core/src/test/org/apache/solr/search/function/NvlValueSourceParser.java (revision 1381159) +++ solr/core/src/test/org/apache/solr/search/function/NvlValueSourceParser.java (working copy) @@ -47,33 +47,33 @@ @Override public ValueSource parse(FunctionQParser fp) throws ParseException { - ValueSource source = fp.parseValueSource(); - final float nvl = fp.parseFloat(); + ValueSource source = fp.parseValueSource(); + final float nvl = fp.parseFloat(); - return new SimpleFloatFunction(source) { - @Override + return new SimpleFloatFunction(source) { + @Override protected String name() { - return "nvl"; - } + return "nvl"; + } - @Override - protected float func(int doc, FunctionValues vals) { - float v = vals.floatVal(doc); - if (v == nvlFloatValue) { - return nvl; - } else { - return v; - } - } - }; + @Override + protected float func(int doc, FunctionValues vals) { + float v = vals.floatVal(doc); + if (v == nvlFloatValue) { + return nvl; + } else { + return v; + } + } + }; } - @Override - public void init(NamedList args) { - /* initialize the value to consider as null */ - Float nvlFloatValueArg = (Float) args.get("nvlFloatValue"); - if (nvlFloatValueArg != null) { - this.nvlFloatValue = nvlFloatValueArg; - } + @Override + public void init(NamedList args) { + /* initialize the value to consider as null */ + Float nvlFloatValueArg = (Float) args.get("nvlFloatValue"); + if (nvlFloatValueArg != null) { + this.nvlFloatValue = nvlFloatValueArg; } + } } \ No newline at end of file Index: solr/core/src/test/org/apache/solr/spelling/DirectSolrSpellCheckerTest.java =================================================================== --- solr/core/src/test/org/apache/solr/spelling/DirectSolrSpellCheckerTest.java (revision 1381159) +++ solr/core/src/test/org/apache/solr/spelling/DirectSolrSpellCheckerTest.java (working copy) @@ -86,12 +86,12 @@ @Test public void testOnlyMorePopularWithExtendedResults() throws Exception { - assertQ(req("q", "teststop:fox", "qt", "spellCheckCompRH", SpellCheckComponent.COMPONENT_NAME, "true", SpellingParams.SPELLCHECK_DICT, "direct", SpellingParams.SPELLCHECK_EXTENDED_RESULTS, "true", SpellingParams.SPELLCHECK_ONLY_MORE_POPULAR, "true"), + assertQ(req("q", "teststop:fox", "qt", "spellCheckCompRH", SpellCheckComponent.COMPONENT_NAME, "true", SpellingParams.SPELLCHECK_DICT, "direct", SpellingParams.SPELLCHECK_EXTENDED_RESULTS, "true", SpellingParams.SPELLCHECK_ONLY_MORE_POPULAR, "true"), "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='fox']/int[@name='origFreq']=1", "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='fox']/arr[@name='suggestion']/lst/str[@name='word']='foo'", "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='fox']/arr[@name='suggestion']/lst/int[@name='freq']=2", "//lst[@name='spellcheck']/lst[@name='suggestions']/bool[@name='correctlySpelled']='true'" - ); + ); } } Index: solr/core/src/test/org/apache/solr/spelling/SpellPossibilityIteratorTest.java =================================================================== --- solr/core/src/test/org/apache/solr/spelling/SpellPossibilityIteratorTest.java (revision 1381159) +++ solr/core/src/test/org/apache/solr/spelling/SpellPossibilityIteratorTest.java (working copy) @@ -28,71 +28,71 @@ import org.junit.Test; public class SpellPossibilityIteratorTest extends SolrTestCaseJ4 { - private static final Token TOKEN_AYE = new Token("AYE", 0, 3); - private static final Token TOKEN_BEE = new Token("BEE", 4, 7); - private static final Token TOKEN_AYE_BEE = new Token("AYE BEE", 0, 7); - private static final Token TOKEN_CEE = new Token("CEE", 8, 11); - - private LinkedHashMap AYE; - private LinkedHashMap BEE; - private LinkedHashMap AYE_BEE; - private LinkedHashMap CEE; - - @Override + private static final Token TOKEN_AYE = new Token("AYE", 0, 3); + private static final Token TOKEN_BEE = new Token("BEE", 4, 7); + private static final Token TOKEN_AYE_BEE = new Token("AYE BEE", 0, 7); + private static final Token TOKEN_CEE = new Token("CEE", 8, 11); + + private LinkedHashMap AYE; + private LinkedHashMap BEE; + private LinkedHashMap AYE_BEE; + private LinkedHashMap CEE; + + @Override @Before - public void setUp() throws Exception { - super.setUp(); + public void setUp() throws Exception { + super.setUp(); - AYE = new LinkedHashMap(); - AYE.put("I", 0); - AYE.put("II", 0); - AYE.put("III", 0); - AYE.put("IV", 0); - AYE.put("V", 0); - AYE.put("VI", 0); - AYE.put("VII", 0); - AYE.put("VIII", 0); - - BEE = new LinkedHashMap(); - BEE.put("alpha", 0); - BEE.put("beta", 0); - BEE.put("gamma", 0); - BEE.put("delta", 0); - BEE.put("epsilon", 0); - BEE.put("zeta", 0); - BEE.put("eta", 0); - BEE.put("theta", 0); - BEE.put("iota", 0); - - AYE_BEE = new LinkedHashMap(); - AYE_BEE.put("one-alpha", 0); - AYE_BEE.put("two-beta", 0); - AYE_BEE.put("three-gamma", 0); - AYE_BEE.put("four-delta", 0); - AYE_BEE.put("five-epsilon", 0); - AYE_BEE.put("six-zeta", 0); - AYE_BEE.put("seven-eta", 0); - AYE_BEE.put("eight-theta", 0); - AYE_BEE.put("nine-iota", 0); - + AYE = new LinkedHashMap(); + AYE.put("I", 0); + AYE.put("II", 0); + AYE.put("III", 0); + AYE.put("IV", 0); + AYE.put("V", 0); + AYE.put("VI", 0); + AYE.put("VII", 0); + AYE.put("VIII", 0); - CEE = new LinkedHashMap(); - CEE.put("one", 0); - CEE.put("two", 0); - CEE.put("three", 0); - CEE.put("four", 0); - CEE.put("five", 0); - CEE.put("six", 0); - CEE.put("seven", 0); - CEE.put("eight", 0); - CEE.put("nine", 0); - CEE.put("ten", 0); - } - - @Test - public void testScalability() throws Exception { - Map> lotsaSuggestions = new LinkedHashMap>(); - lotsaSuggestions.put(TOKEN_AYE , AYE); + BEE = new LinkedHashMap(); + BEE.put("alpha", 0); + BEE.put("beta", 0); + BEE.put("gamma", 0); + BEE.put("delta", 0); + BEE.put("epsilon", 0); + BEE.put("zeta", 0); + BEE.put("eta", 0); + BEE.put("theta", 0); + BEE.put("iota", 0); + + AYE_BEE = new LinkedHashMap(); + AYE_BEE.put("one-alpha", 0); + AYE_BEE.put("two-beta", 0); + AYE_BEE.put("three-gamma", 0); + AYE_BEE.put("four-delta", 0); + AYE_BEE.put("five-epsilon", 0); + AYE_BEE.put("six-zeta", 0); + AYE_BEE.put("seven-eta", 0); + AYE_BEE.put("eight-theta", 0); + AYE_BEE.put("nine-iota", 0); + + + CEE = new LinkedHashMap(); + CEE.put("one", 0); + CEE.put("two", 0); + CEE.put("three", 0); + CEE.put("four", 0); + CEE.put("five", 0); + CEE.put("six", 0); + CEE.put("seven", 0); + CEE.put("eight", 0); + CEE.put("nine", 0); + CEE.put("ten", 0); + } + + @Test + public void testScalability() throws Exception { + Map> lotsaSuggestions = new LinkedHashMap>(); + lotsaSuggestions.put(TOKEN_AYE , AYE); lotsaSuggestions.put(TOKEN_BEE , BEE); lotsaSuggestions.put(TOKEN_CEE , CEE); @@ -112,15 +112,15 @@ lotsaSuggestions.put(new Token("BEE4", 4, 7), BEE); lotsaSuggestions.put(new Token("CEE4", 8, 11), CEE); - PossibilityIterator iter = new PossibilityIterator(lotsaSuggestions, 1000, 10000, false); - int count = 0; - while (iter.hasNext()) { - PossibilityIterator.RankedSpellPossibility rsp = iter.next(); - count++; - } - assertTrue(count==1000); - - lotsaSuggestions.put(new Token("AYE_BEE1", 0, 7), AYE_BEE); + PossibilityIterator iter = new PossibilityIterator(lotsaSuggestions, 1000, 10000, false); + int count = 0; + while (iter.hasNext()) { + PossibilityIterator.RankedSpellPossibility rsp = iter.next(); + count++; + } + assertTrue(count==1000); + + lotsaSuggestions.put(new Token("AYE_BEE1", 0, 7), AYE_BEE); lotsaSuggestions.put(new Token("AYE_BEE2", 0, 7), AYE_BEE); lotsaSuggestions.put(new Token("AYE_BEE3", 0, 7), AYE_BEE); lotsaSuggestions.put(new Token("AYE_BEE4", 0, 7), AYE_BEE); @@ -131,62 +131,62 @@ count++; } assertTrue(count<100); - } - - @Test - public void testSpellPossibilityIterator() throws Exception { - Map> suggestions = new LinkedHashMap>(); - suggestions.put(TOKEN_AYE , AYE); + } + + @Test + public void testSpellPossibilityIterator() throws Exception { + Map> suggestions = new LinkedHashMap>(); + suggestions.put(TOKEN_AYE , AYE); suggestions.put(TOKEN_BEE , BEE); suggestions.put(TOKEN_CEE , CEE); - PossibilityIterator iter = new PossibilityIterator(suggestions, 1000, 10000, false); - int count = 0; - while (iter.hasNext()) { - - PossibilityIterator.RankedSpellPossibility rsp = iter.next(); - if(count==0) { - assertTrue("I".equals(rsp.corrections.get(0).getCorrection())); - assertTrue("alpha".equals(rsp.corrections.get(1).getCorrection())); - assertTrue("one".equals(rsp.corrections.get(2).getCorrection())); - } - count++; - } - assertTrue(("Three maps (8*9*10) should return 720 iterations but instead returned " + count), count == 720); + PossibilityIterator iter = new PossibilityIterator(suggestions, 1000, 10000, false); + int count = 0; + while (iter.hasNext()) { - suggestions.remove(TOKEN_CEE); - iter = new PossibilityIterator(suggestions, 100, 10000, false); - count = 0; - while (iter.hasNext()) { - iter.next(); - count++; - } - assertTrue(("Two maps (8*9) should return 72 iterations but instead returned " + count), count == 72); + PossibilityIterator.RankedSpellPossibility rsp = iter.next(); + if(count==0) { + assertTrue("I".equals(rsp.corrections.get(0).getCorrection())); + assertTrue("alpha".equals(rsp.corrections.get(1).getCorrection())); + assertTrue("one".equals(rsp.corrections.get(2).getCorrection())); + } + count++; + } + assertTrue(("Three maps (8*9*10) should return 720 iterations but instead returned " + count), count == 720); - suggestions.remove(TOKEN_BEE); - iter = new PossibilityIterator(suggestions, 5, 10000, false); - count = 0; - while (iter.hasNext()) { - iter.next(); - count++; - } - assertTrue(("We requested 5 suggestions but got " + count), count == 5); + suggestions.remove(TOKEN_CEE); + iter = new PossibilityIterator(suggestions, 100, 10000, false); + count = 0; + while (iter.hasNext()) { + iter.next(); + count++; + } + assertTrue(("Two maps (8*9) should return 72 iterations but instead returned " + count), count == 72); - suggestions.remove(TOKEN_AYE); - iter = new PossibilityIterator(suggestions, Integer.MAX_VALUE, 10000, false); - count = 0; - while (iter.hasNext()) { - iter.next(); - count++; - } - assertTrue(("No maps should return 0 iterations but instead returned " + count), count == 0); + suggestions.remove(TOKEN_BEE); + iter = new PossibilityIterator(suggestions, 5, 10000, false); + count = 0; + while (iter.hasNext()) { + iter.next(); + count++; + } + assertTrue(("We requested 5 suggestions but got " + count), count == 5); - } - - @Test + suggestions.remove(TOKEN_AYE); + iter = new PossibilityIterator(suggestions, Integer.MAX_VALUE, 10000, false); + count = 0; + while (iter.hasNext()) { + iter.next(); + count++; + } + assertTrue(("No maps should return 0 iterations but instead returned " + count), count == 0); + + } + + @Test public void testOverlappingTokens() throws Exception { - Map> overlappingSuggestions = new LinkedHashMap>(); - overlappingSuggestions.put(TOKEN_AYE, AYE); + Map> overlappingSuggestions = new LinkedHashMap>(); + overlappingSuggestions.put(TOKEN_AYE, AYE); overlappingSuggestions.put(TOKEN_BEE, BEE); overlappingSuggestions.put(TOKEN_AYE_BEE, AYE_BEE); overlappingSuggestions.put(TOKEN_CEE, CEE); Index: solr/core/src/test/org/apache/solr/spelling/SpellCheckCollatorTest.java =================================================================== --- solr/core/src/test/org/apache/solr/spelling/SpellCheckCollatorTest.java (revision 1381159) +++ solr/core/src/test/org/apache/solr/spelling/SpellCheckCollatorTest.java (working copy) @@ -40,30 +40,30 @@ @Slow public class SpellCheckCollatorTest extends SolrTestCaseJ4 { - @BeforeClass - public static void beforeClass() throws Exception { - initCore("solrconfig-spellcheckcomponent.xml", "schema.xml"); - assertNull(h.validateUpdate(adoc("id", "0", "lowerfilt", "faith hope and love"))); - assertNull(h.validateUpdate(adoc("id", "1", "lowerfilt", "faith hope and loaves"))); - assertNull(h.validateUpdate(adoc("id", "2", "lowerfilt", "fat hops and loaves"))); - assertNull(h.validateUpdate(adoc("id", "3", "lowerfilt", "faith of homer"))); - assertNull(h.validateUpdate(adoc("id", "4", "lowerfilt", "fat of homer"))); - assertNull(h.validateUpdate(adoc("id", "5", "lowerfilt1", "peace"))); - assertNull(h.validateUpdate(adoc("id", "6", "lowerfilt", "hyphenated word"))); - assertNull(h.validateUpdate(adoc("id", "7", "teststop", "Jane filled out a form at Charles De Gaulle"))); - assertNull(h.validateUpdate(adoc("id", "8", "teststop", "Dick flew from Heathrow"))); - assertNull(h.validateUpdate(adoc("id", "9", "teststop", "Jane is stuck in customs because Spot chewed up the form"))); - assertNull(h.validateUpdate(adoc("id", "10", "teststop", "Once in Paris Dick built a fire on the hearth"))); - assertNull(h.validateUpdate(adoc("id", "11", "teststop", "Dick waited for Jane as he watched the sparks flow upward"))); - assertNull(h.validateUpdate(adoc("id", "12", "teststop", "This June parisian rendez-vous is ruined because of a customs snafu"))); - assertNull(h.validateUpdate(adoc("id", "13", "teststop", "partisan political machine"))); - assertNull(h.validateUpdate(commit())); - } + @BeforeClass + public static void beforeClass() throws Exception { + initCore("solrconfig-spellcheckcomponent.xml", "schema.xml"); + assertNull(h.validateUpdate(adoc("id", "0", "lowerfilt", "faith hope and love"))); + assertNull(h.validateUpdate(adoc("id", "1", "lowerfilt", "faith hope and loaves"))); + assertNull(h.validateUpdate(adoc("id", "2", "lowerfilt", "fat hops and loaves"))); + assertNull(h.validateUpdate(adoc("id", "3", "lowerfilt", "faith of homer"))); + assertNull(h.validateUpdate(adoc("id", "4", "lowerfilt", "fat of homer"))); + assertNull(h.validateUpdate(adoc("id", "5", "lowerfilt1", "peace"))); + assertNull(h.validateUpdate(adoc("id", "6", "lowerfilt", "hyphenated word"))); + assertNull(h.validateUpdate(adoc("id", "7", "teststop", "Jane filled out a form at Charles De Gaulle"))); + assertNull(h.validateUpdate(adoc("id", "8", "teststop", "Dick flew from Heathrow"))); + assertNull(h.validateUpdate(adoc("id", "9", "teststop", "Jane is stuck in customs because Spot chewed up the form"))); + assertNull(h.validateUpdate(adoc("id", "10", "teststop", "Once in Paris Dick built a fire on the hearth"))); + assertNull(h.validateUpdate(adoc("id", "11", "teststop", "Dick waited for Jane as he watched the sparks flow upward"))); + assertNull(h.validateUpdate(adoc("id", "12", "teststop", "This June parisian rendez-vous is ruined because of a customs snafu"))); + assertNull(h.validateUpdate(adoc("id", "13", "teststop", "partisan political machine"))); + assertNull(h.validateUpdate(commit())); + } - @Test - public void testCollationWithHypens() throws Exception - { - SolrCore core = h.getCore(); + @Test + public void testCollationWithHypens() throws Exception + { + SolrCore core = h.getCore(); SearchComponent speller = core.getSearchComponent("spellcheck"); assertTrue("speller is null and it shouldn't be", speller != null); @@ -111,12 +111,12 @@ } } - - public void testCollateWithOverride() throws Exception - { - assertQ( + + public void testCollateWithOverride() throws Exception + { + assertQ( req( - SpellCheckComponent.COMPONENT_NAME, "true", + SpellCheckComponent.COMPONENT_NAME, "true", SpellCheckComponent.SPELLCHECK_DICT, "direct", SpellingParams.SPELLCHECK_COUNT, "10", SpellingParams.SPELLCHECK_COLLATE, "true", @@ -130,303 +130,303 @@ ), "//lst[@name='spellcheck']/lst[@name='suggestions']/str[@name='collation']='parisian political machine'" ); - assertQ( - req( - SpellCheckComponent.COMPONENT_NAME, "true", - SpellCheckComponent.SPELLCHECK_DICT, "direct", - SpellingParams.SPELLCHECK_COUNT, "10", - SpellingParams.SPELLCHECK_COLLATE, "true", - SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "10", - SpellingParams.SPELLCHECK_MAX_COLLATIONS, "10", - "qt", "spellCheckCompRH", - "defType", "edismax", - "qf", "teststop", - "mm", "1", - SpellingParams.SPELLCHECK_COLLATE_PARAM_OVERRIDE + "mm", "100%", - CommonParams.Q, "partisian politcal mashine" - ), - "//lst[@name='spellcheck']/lst[@name='suggestions']/str[@name='collation']='partisan political machine'" - ); + assertQ( + req( + SpellCheckComponent.COMPONENT_NAME, "true", + SpellCheckComponent.SPELLCHECK_DICT, "direct", + SpellingParams.SPELLCHECK_COUNT, "10", + SpellingParams.SPELLCHECK_COLLATE, "true", + SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "10", + SpellingParams.SPELLCHECK_MAX_COLLATIONS, "10", + "qt", "spellCheckCompRH", + "defType", "edismax", + "qf", "teststop", + "mm", "1", + SpellingParams.SPELLCHECK_COLLATE_PARAM_OVERRIDE + "mm", "100%", + CommonParams.Q, "partisian politcal mashine" + ), + "//lst[@name='spellcheck']/lst[@name='suggestions']/str[@name='collation']='partisan political machine'" + ); - } + } - @Test - public void testCollateWithFilter() throws Exception - { - SolrCore core = h.getCore(); - SearchComponent speller = core.getSearchComponent("spellcheck"); - assertTrue("speller is null and it shouldn't be", speller != null); - - ModifiableSolrParams params = new ModifiableSolrParams(); - params.add(SpellCheckComponent.COMPONENT_NAME, "true"); - params.add(SpellingParams.SPELLCHECK_BUILD, "true"); - params.add(SpellingParams.SPELLCHECK_COUNT, "10"); - params.add(SpellingParams.SPELLCHECK_COLLATE, "true"); - params.add(SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "10"); - params.add(SpellingParams.SPELLCHECK_MAX_COLLATIONS, "10"); - params.add(CommonParams.Q, "lowerfilt:(+fauth +home +loane)"); - params.add(CommonParams.FQ, "NOT(id:1)"); - - //Because a FilterQuery is applied which removes doc id#1 from possible hits, we would - //not want the collations to return us "lowerfilt:(+faith +hope +loaves)" as this only matches doc id#1. - SolrRequestHandler handler = core.getRequestHandler("spellCheckCompRH"); - SolrQueryResponse rsp = new SolrQueryResponse(); - rsp.add("responseHeader", new SimpleOrderedMap()); - SolrQueryRequest req = new LocalSolrQueryRequest(core, params); - handler.handleRequest(req, rsp); - req.close(); - NamedList values = rsp.getValues(); - NamedList spellCheck = (NamedList) values.get("spellcheck"); - NamedList suggestions = (NamedList) spellCheck.get("suggestions"); - List collations = suggestions.getAll("collation"); - assertTrue(collations.size() > 0); - for(String collation : collations) { - assertTrue(!collation.equals("lowerfilt:(+faith +hope +loaves)")); - } - } - - @Test - public void testCollateWithMultipleRequestHandlers() throws Exception - { - SolrCore core = h.getCore(); - SearchComponent speller = core.getSearchComponent("spellcheck"); - assertTrue("speller is null and it shouldn't be", speller != null); - - ModifiableSolrParams params = new ModifiableSolrParams(); - params.add(SpellCheckComponent.COMPONENT_NAME, "true"); - params.add(SpellingParams.SPELLCHECK_DICT, "multipleFields"); - params.add(SpellingParams.SPELLCHECK_BUILD, "true"); - params.add(SpellingParams.SPELLCHECK_COUNT, "10"); - params.add(SpellingParams.SPELLCHECK_COLLATE, "true"); - params.add(SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "1"); - params.add(SpellingParams.SPELLCHECK_MAX_COLLATIONS, "1"); - params.add(CommonParams.Q, "peac"); - - //SpellCheckCompRH has no "qf" defined. It will not find "peace" from "peac" despite it being in the dictionary - //because requrying against this Request Handler results in 0 hits. - SolrRequestHandler handler = core.getRequestHandler("spellCheckCompRH"); - SolrQueryResponse rsp = new SolrQueryResponse(); - rsp.add("responseHeader", new SimpleOrderedMap()); - SolrQueryRequest req = new LocalSolrQueryRequest(core, params); - handler.handleRequest(req, rsp); - req.close(); - NamedList values = rsp.getValues(); - NamedList spellCheck = (NamedList) values.get("spellcheck"); - NamedList suggestions = (NamedList) spellCheck.get("suggestions"); - String singleCollation = (String) suggestions.get("collation"); - assertNull(singleCollation); - - //SpellCheckCompRH1 has "lowerfilt1" defined in the "qf" param. It will find "peace" from "peac" because - //requrying field "lowerfilt1" returns the hit. - params.remove(SpellingParams.SPELLCHECK_BUILD); - handler = core.getRequestHandler("spellCheckCompRH1"); - rsp = new SolrQueryResponse(); - rsp.add("responseHeader", new SimpleOrderedMap()); - req = new LocalSolrQueryRequest(core, params); - handler.handleRequest(req, rsp); - req.close(); - values = rsp.getValues(); - spellCheck = (NamedList) values.get("spellcheck"); - suggestions = (NamedList) spellCheck.get("suggestions"); - singleCollation = (String) suggestions.get("collation"); - assertEquals(singleCollation, "peace"); - } + @Test + public void testCollateWithFilter() throws Exception + { + SolrCore core = h.getCore(); + SearchComponent speller = core.getSearchComponent("spellcheck"); + assertTrue("speller is null and it shouldn't be", speller != null); - @Test - public void testExtendedCollate() throws Exception { - SolrCore core = h.getCore(); - SearchComponent speller = core.getSearchComponent("spellcheck"); - assertTrue("speller is null and it shouldn't be", speller != null); + ModifiableSolrParams params = new ModifiableSolrParams(); + params.add(SpellCheckComponent.COMPONENT_NAME, "true"); + params.add(SpellingParams.SPELLCHECK_BUILD, "true"); + params.add(SpellingParams.SPELLCHECK_COUNT, "10"); + params.add(SpellingParams.SPELLCHECK_COLLATE, "true"); + params.add(SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "10"); + params.add(SpellingParams.SPELLCHECK_MAX_COLLATIONS, "10"); + params.add(CommonParams.Q, "lowerfilt:(+fauth +home +loane)"); + params.add(CommonParams.FQ, "NOT(id:1)"); - ModifiableSolrParams params = new ModifiableSolrParams(); - params.add(CommonParams.QT, "spellCheckCompRH"); - params.add(CommonParams.Q, "lowerfilt:(+fauth +home +loane)"); - params.add(SpellingParams.SPELLCHECK_EXTENDED_RESULTS, "true"); - params.add(SpellCheckComponent.COMPONENT_NAME, "true"); - params.add(SpellingParams.SPELLCHECK_BUILD, "true"); - params.add(SpellingParams.SPELLCHECK_COUNT, "10"); - params.add(SpellingParams.SPELLCHECK_COLLATE, "true"); + //Because a FilterQuery is applied which removes doc id#1 from possible hits, we would + //not want the collations to return us "lowerfilt:(+faith +hope +loaves)" as this only matches doc id#1. + SolrRequestHandler handler = core.getRequestHandler("spellCheckCompRH"); + SolrQueryResponse rsp = new SolrQueryResponse(); + rsp.add("responseHeader", new SimpleOrderedMap()); + SolrQueryRequest req = new LocalSolrQueryRequest(core, params); + handler.handleRequest(req, rsp); + req.close(); + NamedList values = rsp.getValues(); + NamedList spellCheck = (NamedList) values.get("spellcheck"); + NamedList suggestions = (NamedList) spellCheck.get("suggestions"); + List collations = suggestions.getAll("collation"); + assertTrue(collations.size() > 0); + for(String collation : collations) { + assertTrue(!collation.equals("lowerfilt:(+faith +hope +loaves)")); + } + } - // Testing backwards-compatible behavior. - // Returns 1 collation as a single string. - // All words are "correct" per the dictionary, but this collation would - // return no results if tried. - SolrRequestHandler handler = core.getRequestHandler("spellCheckCompRH"); - SolrQueryResponse rsp = new SolrQueryResponse(); - rsp.add("responseHeader", new SimpleOrderedMap()); - SolrQueryRequest req = new LocalSolrQueryRequest(core, params); - handler.handleRequest(req, rsp); - req.close(); - NamedList values = rsp.getValues(); - NamedList spellCheck = (NamedList) values.get("spellcheck"); - NamedList suggestions = (NamedList) spellCheck.get("suggestions"); - String singleCollation = (String) suggestions.get("collation"); - assertEquals("lowerfilt:(+faith +homer +loaves)", singleCollation); + @Test + public void testCollateWithMultipleRequestHandlers() throws Exception + { + SolrCore core = h.getCore(); + SearchComponent speller = core.getSearchComponent("spellcheck"); + assertTrue("speller is null and it shouldn't be", speller != null); - // Testing backwards-compatible response format but will only return a - // collation that would return results. - params.remove(SpellingParams.SPELLCHECK_BUILD); - params.add(SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "5"); - params.add(SpellingParams.SPELLCHECK_MAX_COLLATIONS, "1"); - handler = core.getRequestHandler("spellCheckCompRH"); - rsp = new SolrQueryResponse(); - rsp.add("responseHeader", new SimpleOrderedMap()); - req = new LocalSolrQueryRequest(core, params); - handler.handleRequest(req, rsp); + ModifiableSolrParams params = new ModifiableSolrParams(); + params.add(SpellCheckComponent.COMPONENT_NAME, "true"); + params.add(SpellingParams.SPELLCHECK_DICT, "multipleFields"); + params.add(SpellingParams.SPELLCHECK_BUILD, "true"); + params.add(SpellingParams.SPELLCHECK_COUNT, "10"); + params.add(SpellingParams.SPELLCHECK_COLLATE, "true"); + params.add(SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "1"); + params.add(SpellingParams.SPELLCHECK_MAX_COLLATIONS, "1"); + params.add(CommonParams.Q, "peac"); + + //SpellCheckCompRH has no "qf" defined. It will not find "peace" from "peac" despite it being in the dictionary + //because requrying against this Request Handler results in 0 hits. + SolrRequestHandler handler = core.getRequestHandler("spellCheckCompRH"); + SolrQueryResponse rsp = new SolrQueryResponse(); + rsp.add("responseHeader", new SimpleOrderedMap()); + SolrQueryRequest req = new LocalSolrQueryRequest(core, params); + handler.handleRequest(req, rsp); req.close(); - values = rsp.getValues(); - spellCheck = (NamedList) values.get("spellcheck"); - suggestions = (NamedList) spellCheck.get("suggestions"); - singleCollation = (String) suggestions.get("collation"); - assertEquals("lowerfilt:(+faith +hope +loaves)", singleCollation); + NamedList values = rsp.getValues(); + NamedList spellCheck = (NamedList) values.get("spellcheck"); + NamedList suggestions = (NamedList) spellCheck.get("suggestions"); + String singleCollation = (String) suggestions.get("collation"); + assertNull(singleCollation); - // Testing returning multiple collations if more than one valid - // combination exists. - params.remove(SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES); - params.remove(SpellingParams.SPELLCHECK_MAX_COLLATIONS); - params.add(SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "10"); - params.add(SpellingParams.SPELLCHECK_MAX_COLLATIONS, "2"); - handler = core.getRequestHandler("spellCheckCompRH"); - rsp = new SolrQueryResponse(); - rsp.add("responseHeader", new SimpleOrderedMap()); - req = new LocalSolrQueryRequest(core, params); - handler.handleRequest(req, rsp); - req.close(); - values = rsp.getValues(); - spellCheck = (NamedList) values.get("spellcheck"); - suggestions = (NamedList) spellCheck.get("suggestions"); - List collations = suggestions.getAll("collation"); - assertTrue(collations.size() == 2); - for (String multipleCollation : collations) { - assertTrue(multipleCollation.equals("lowerfilt:(+faith +hope +love)") - || multipleCollation.equals("lowerfilt:(+faith +hope +loaves)")); - } + //SpellCheckCompRH1 has "lowerfilt1" defined in the "qf" param. It will find "peace" from "peac" because + //requrying field "lowerfilt1" returns the hit. + params.remove(SpellingParams.SPELLCHECK_BUILD); + handler = core.getRequestHandler("spellCheckCompRH1"); + rsp = new SolrQueryResponse(); + rsp.add("responseHeader", new SimpleOrderedMap()); + req = new LocalSolrQueryRequest(core, params); + handler.handleRequest(req, rsp); + req.close(); + values = rsp.getValues(); + spellCheck = (NamedList) values.get("spellcheck"); + suggestions = (NamedList) spellCheck.get("suggestions"); + singleCollation = (String) suggestions.get("collation"); + assertEquals(singleCollation, "peace"); + } - // Testing return multiple collations with expanded collation response - // format. - params.add(SpellingParams.SPELLCHECK_COLLATE_EXTENDED_RESULTS, "true"); - handler = core.getRequestHandler("spellCheckCompRH"); - rsp = new SolrQueryResponse(); - rsp.add("responseHeader", new SimpleOrderedMap()); - req = new LocalSolrQueryRequest(core, params); - handler.handleRequest(req, rsp); - req.close(); - values = rsp.getValues(); - spellCheck = (NamedList) values.get("spellcheck"); - suggestions = (NamedList) spellCheck.get("suggestions"); - List expandedCollationList = suggestions.getAll("collation"); - Set usedcollations = new HashSet(); - assertTrue(expandedCollationList.size() == 2); - for (NamedList expandedCollation : expandedCollationList) { - String multipleCollation = (String) expandedCollation.get("collationQuery"); - assertTrue(multipleCollation.equals("lowerfilt:(+faith +hope +love)") - || multipleCollation.equals("lowerfilt:(+faith +hope +loaves)")); - assertTrue(!usedcollations.contains(multipleCollation)); - usedcollations.add(multipleCollation); + @Test + public void testExtendedCollate() throws Exception { + SolrCore core = h.getCore(); + SearchComponent speller = core.getSearchComponent("spellcheck"); + assertTrue("speller is null and it shouldn't be", speller != null); - int hits = (Integer) expandedCollation.get("hits"); - assertTrue(hits == 1); + ModifiableSolrParams params = new ModifiableSolrParams(); + params.add(CommonParams.QT, "spellCheckCompRH"); + params.add(CommonParams.Q, "lowerfilt:(+fauth +home +loane)"); + params.add(SpellingParams.SPELLCHECK_EXTENDED_RESULTS, "true"); + params.add(SpellCheckComponent.COMPONENT_NAME, "true"); + params.add(SpellingParams.SPELLCHECK_BUILD, "true"); + params.add(SpellingParams.SPELLCHECK_COUNT, "10"); + params.add(SpellingParams.SPELLCHECK_COLLATE, "true"); - NamedList misspellingsAndCorrections = (NamedList) expandedCollation.get("misspellingsAndCorrections"); - assertTrue(misspellingsAndCorrections.size() == 3); + // Testing backwards-compatible behavior. + // Returns 1 collation as a single string. + // All words are "correct" per the dictionary, but this collation would + // return no results if tried. + SolrRequestHandler handler = core.getRequestHandler("spellCheckCompRH"); + SolrQueryResponse rsp = new SolrQueryResponse(); + rsp.add("responseHeader", new SimpleOrderedMap()); + SolrQueryRequest req = new LocalSolrQueryRequest(core, params); + handler.handleRequest(req, rsp); + req.close(); + NamedList values = rsp.getValues(); + NamedList spellCheck = (NamedList) values.get("spellcheck"); + NamedList suggestions = (NamedList) spellCheck.get("suggestions"); + String singleCollation = (String) suggestions.get("collation"); + assertEquals("lowerfilt:(+faith +homer +loaves)", singleCollation); - String correctionForFauth = (String) misspellingsAndCorrections.get("fauth"); - String correctionForHome = (String) misspellingsAndCorrections.get("home"); - String correctionForLoane = (String) misspellingsAndCorrections.get("loane"); - assertTrue(correctionForFauth.equals("faith")); - assertTrue(correctionForHome.equals("hope")); - assertTrue(correctionForLoane.equals("love") || correctionForLoane.equals("loaves")); - } - } - - @Test - public void testCollateWithGrouping() throws Exception - { - SolrCore core = h.getCore(); - SearchComponent speller = core.getSearchComponent("spellcheck"); - assertTrue("speller is null and it shouldn't be", speller != null); - - ModifiableSolrParams params = new ModifiableSolrParams(); - params.add(SpellCheckComponent.COMPONENT_NAME, "true"); - params.add(SpellingParams.SPELLCHECK_BUILD, "true"); - params.add(SpellingParams.SPELLCHECK_COUNT, "10"); - params.add(SpellingParams.SPELLCHECK_COLLATE, "true"); - params.add(SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "5"); - params.add(SpellingParams.SPELLCHECK_MAX_COLLATIONS, "1"); - params.add(CommonParams.Q, "lowerfilt:(+fauth)"); - params.add(GroupParams.GROUP, "true"); - params.add(GroupParams.GROUP_FIELD, "id"); - - //Because a FilterQuery is applied which removes doc id#1 from possible hits, we would - //not want the collations to return us "lowerfilt:(+faith +hope +loaves)" as this only matches doc id#1. - SolrRequestHandler handler = core.getRequestHandler("spellCheckCompRH"); - SolrQueryResponse rsp = new SolrQueryResponse(); - rsp.add("responseHeader", new SimpleOrderedMap()); - SolrQueryRequest req = new LocalSolrQueryRequest(core, params); - handler.handleRequest(req, rsp); - req.close(); - NamedList values = rsp.getValues(); - NamedList spellCheck = (NamedList) values.get("spellcheck"); - NamedList suggestions = (NamedList) spellCheck.get("suggestions"); - List collations = suggestions.getAll("collation"); - assertTrue(collations.size() == 1); - } - - @Test - public void testContextSensitiveCollate() throws Exception { - // DirectSolrSpellChecker IndexBasedSpellChecker - String[] dictionary = {"direct", "default_teststop" }; - for(int i=0 ; i<1 ; i++) { - assertQ( - req( - "q", "teststop:(flew AND form AND heathrow)", - "qt", "spellCheckCompRH", - "indent", "true", - SpellCheckComponent.COMPONENT_NAME, "true", - SpellCheckComponent.SPELLCHECK_DICT, dictionary[i], - SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS, "true", - SpellCheckComponent.SPELLCHECK_COUNT, "10", - SpellCheckComponent.SPELLCHECK_ALTERNATIVE_TERM_COUNT, "5", - SpellCheckComponent.SPELLCHECK_MAX_RESULTS_FOR_SUGGEST, "0", - SpellCheckComponent.SPELLCHECK_COLLATE, "true", - SpellCheckComponent.SPELLCHECK_MAX_COLLATION_TRIES, "10", - SpellCheckComponent.SPELLCHECK_MAX_COLLATIONS, "1", - SpellCheckComponent.SPELLCHECK_COLLATE_EXTENDED_RESULTS, "true" - ), - "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='flew']/arr[@name='suggestion']/lst/str[@name='word']='flow'", - "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='form']/arr[@name='suggestion']/lst/str[@name='word']='from'", + // Testing backwards-compatible response format but will only return a + // collation that would return results. + params.remove(SpellingParams.SPELLCHECK_BUILD); + params.add(SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "5"); + params.add(SpellingParams.SPELLCHECK_MAX_COLLATIONS, "1"); + handler = core.getRequestHandler("spellCheckCompRH"); + rsp = new SolrQueryResponse(); + rsp.add("responseHeader", new SimpleOrderedMap()); + req = new LocalSolrQueryRequest(core, params); + handler.handleRequest(req, rsp); + req.close(); + values = rsp.getValues(); + spellCheck = (NamedList) values.get("spellcheck"); + suggestions = (NamedList) spellCheck.get("suggestions"); + singleCollation = (String) suggestions.get("collation"); + assertEquals("lowerfilt:(+faith +hope +loaves)", singleCollation); + + // Testing returning multiple collations if more than one valid + // combination exists. + params.remove(SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES); + params.remove(SpellingParams.SPELLCHECK_MAX_COLLATIONS); + params.add(SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "10"); + params.add(SpellingParams.SPELLCHECK_MAX_COLLATIONS, "2"); + handler = core.getRequestHandler("spellCheckCompRH"); + rsp = new SolrQueryResponse(); + rsp.add("responseHeader", new SimpleOrderedMap()); + req = new LocalSolrQueryRequest(core, params); + handler.handleRequest(req, rsp); + req.close(); + values = rsp.getValues(); + spellCheck = (NamedList) values.get("spellcheck"); + suggestions = (NamedList) spellCheck.get("suggestions"); + List collations = suggestions.getAll("collation"); + assertTrue(collations.size() == 2); + for (String multipleCollation : collations) { + assertTrue(multipleCollation.equals("lowerfilt:(+faith +hope +love)") + || multipleCollation.equals("lowerfilt:(+faith +hope +loaves)")); + } + + // Testing return multiple collations with expanded collation response + // format. + params.add(SpellingParams.SPELLCHECK_COLLATE_EXTENDED_RESULTS, "true"); + handler = core.getRequestHandler("spellCheckCompRH"); + rsp = new SolrQueryResponse(); + rsp.add("responseHeader", new SimpleOrderedMap()); + req = new LocalSolrQueryRequest(core, params); + handler.handleRequest(req, rsp); + req.close(); + values = rsp.getValues(); + spellCheck = (NamedList) values.get("spellcheck"); + suggestions = (NamedList) spellCheck.get("suggestions"); + List expandedCollationList = suggestions.getAll("collation"); + Set usedcollations = new HashSet(); + assertTrue(expandedCollationList.size() == 2); + for (NamedList expandedCollation : expandedCollationList) { + String multipleCollation = (String) expandedCollation.get("collationQuery"); + assertTrue(multipleCollation.equals("lowerfilt:(+faith +hope +love)") + || multipleCollation.equals("lowerfilt:(+faith +hope +loaves)")); + assertTrue(!usedcollations.contains(multipleCollation)); + usedcollations.add(multipleCollation); + + int hits = (Integer) expandedCollation.get("hits"); + assertTrue(hits == 1); + + NamedList misspellingsAndCorrections = (NamedList) expandedCollation.get("misspellingsAndCorrections"); + assertTrue(misspellingsAndCorrections.size() == 3); + + String correctionForFauth = (String) misspellingsAndCorrections.get("fauth"); + String correctionForHome = (String) misspellingsAndCorrections.get("home"); + String correctionForLoane = (String) misspellingsAndCorrections.get("loane"); + assertTrue(correctionForFauth.equals("faith")); + assertTrue(correctionForHome.equals("hope")); + assertTrue(correctionForLoane.equals("love") || correctionForLoane.equals("loaves")); + } + } + + @Test + public void testCollateWithGrouping() throws Exception + { + SolrCore core = h.getCore(); + SearchComponent speller = core.getSearchComponent("spellcheck"); + assertTrue("speller is null and it shouldn't be", speller != null); + + ModifiableSolrParams params = new ModifiableSolrParams(); + params.add(SpellCheckComponent.COMPONENT_NAME, "true"); + params.add(SpellingParams.SPELLCHECK_BUILD, "true"); + params.add(SpellingParams.SPELLCHECK_COUNT, "10"); + params.add(SpellingParams.SPELLCHECK_COLLATE, "true"); + params.add(SpellingParams.SPELLCHECK_MAX_COLLATION_TRIES, "5"); + params.add(SpellingParams.SPELLCHECK_MAX_COLLATIONS, "1"); + params.add(CommonParams.Q, "lowerfilt:(+fauth)"); + params.add(GroupParams.GROUP, "true"); + params.add(GroupParams.GROUP_FIELD, "id"); + + //Because a FilterQuery is applied which removes doc id#1 from possible hits, we would + //not want the collations to return us "lowerfilt:(+faith +hope +loaves)" as this only matches doc id#1. + SolrRequestHandler handler = core.getRequestHandler("spellCheckCompRH"); + SolrQueryResponse rsp = new SolrQueryResponse(); + rsp.add("responseHeader", new SimpleOrderedMap()); + SolrQueryRequest req = new LocalSolrQueryRequest(core, params); + handler.handleRequest(req, rsp); + req.close(); + NamedList values = rsp.getValues(); + NamedList spellCheck = (NamedList) values.get("spellcheck"); + NamedList suggestions = (NamedList) spellCheck.get("suggestions"); + List collations = suggestions.getAll("collation"); + assertTrue(collations.size() == 1); + } + + @Test + public void testContextSensitiveCollate() throws Exception { + // DirectSolrSpellChecker IndexBasedSpellChecker + String[] dictionary = {"direct", "default_teststop" }; + for(int i=0 ; i<1 ; i++) { + assertQ( + req( + "q", "teststop:(flew AND form AND heathrow)", + "qt", "spellCheckCompRH", + "indent", "true", + SpellCheckComponent.COMPONENT_NAME, "true", + SpellCheckComponent.SPELLCHECK_DICT, dictionary[i], + SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS, "true", + SpellCheckComponent.SPELLCHECK_COUNT, "10", + SpellCheckComponent.SPELLCHECK_ALTERNATIVE_TERM_COUNT, "5", + SpellCheckComponent.SPELLCHECK_MAX_RESULTS_FOR_SUGGEST, "0", + SpellCheckComponent.SPELLCHECK_COLLATE, "true", + SpellCheckComponent.SPELLCHECK_MAX_COLLATION_TRIES, "10", + SpellCheckComponent.SPELLCHECK_MAX_COLLATIONS, "1", + SpellCheckComponent.SPELLCHECK_COLLATE_EXTENDED_RESULTS, "true" + ), + "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='flew']/arr[@name='suggestion']/lst/str[@name='word']='flow'", + "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='form']/arr[@name='suggestion']/lst/str[@name='word']='from'", /* DirectSolrSpellChecker won't suggest if the edit distance > 2, so we can't test for this one... - "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='heathrow']/arr[@name='suggestion']/lst/str[@name='word']='hearth'", + "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='heathrow']/arr[@name='suggestion']/lst/str[@name='word']='hearth'", */ - "//lst[@name='spellcheck']/lst[@name='suggestions']/bool[@name='correctlySpelled']='false'", - "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='collation']/str[@name='collationQuery']='teststop:(flew AND from AND heathrow)'", - "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='collation']/int[@name='hits']=1", - "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='collation']/lst[@name='misspellingsAndCorrections']/str[@name='form']='from'" - ); - - assertQ( - req( - "q", "teststop:(june AND customs)", - "qt", "spellCheckCompRH", - "indent", "true", - SpellCheckComponent.COMPONENT_NAME, "true", - SpellCheckComponent.SPELLCHECK_DICT, dictionary[i], - SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS, "true", - SpellCheckComponent.SPELLCHECK_COUNT, "10", - SpellCheckComponent.SPELLCHECK_ALTERNATIVE_TERM_COUNT, "5", - SpellCheckComponent.SPELLCHECK_MAX_RESULTS_FOR_SUGGEST, "1", - SpellCheckComponent.SPELLCHECK_COLLATE, "true", - SpellCheckComponent.SPELLCHECK_MAX_COLLATION_TRIES, "10", - SpellCheckComponent.SPELLCHECK_MAX_COLLATIONS, "1", - SpellCheckComponent.SPELLCHECK_COLLATE_EXTENDED_RESULTS, "true" - ), - "//result[@numFound=1]", - "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='june']/arr[@name='suggestion']/lst/str[@name='word']='jane'", - "//lst[@name='spellcheck']/lst[@name='suggestions']/bool[@name='correctlySpelled']='false'", - "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='collation']/str[@name='collationQuery']='teststop:(jane AND customs)'", - "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='collation']/int[@name='hits']=1", - "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='collation']/lst[@name='misspellingsAndCorrections']/str[@name='june']='jane'" - ); - } - } + "//lst[@name='spellcheck']/lst[@name='suggestions']/bool[@name='correctlySpelled']='false'", + "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='collation']/str[@name='collationQuery']='teststop:(flew AND from AND heathrow)'", + "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='collation']/int[@name='hits']=1", + "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='collation']/lst[@name='misspellingsAndCorrections']/str[@name='form']='from'" + ); + + assertQ( + req( + "q", "teststop:(june AND customs)", + "qt", "spellCheckCompRH", + "indent", "true", + SpellCheckComponent.COMPONENT_NAME, "true", + SpellCheckComponent.SPELLCHECK_DICT, dictionary[i], + SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS, "true", + SpellCheckComponent.SPELLCHECK_COUNT, "10", + SpellCheckComponent.SPELLCHECK_ALTERNATIVE_TERM_COUNT, "5", + SpellCheckComponent.SPELLCHECK_MAX_RESULTS_FOR_SUGGEST, "1", + SpellCheckComponent.SPELLCHECK_COLLATE, "true", + SpellCheckComponent.SPELLCHECK_MAX_COLLATION_TRIES, "10", + SpellCheckComponent.SPELLCHECK_MAX_COLLATIONS, "1", + SpellCheckComponent.SPELLCHECK_COLLATE_EXTENDED_RESULTS, "true" + ), + "//result[@numFound=1]", + "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='june']/arr[@name='suggestion']/lst/str[@name='word']='jane'", + "//lst[@name='spellcheck']/lst[@name='suggestions']/bool[@name='correctlySpelled']='false'", + "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='collation']/str[@name='collationQuery']='teststop:(jane AND customs)'", + "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='collation']/int[@name='hits']=1", + "//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='collation']/lst[@name='misspellingsAndCorrections']/str[@name='june']='jane'" + ); + } + } } Index: solr/core/src/test/org/apache/solr/handler/XsltUpdateRequestHandlerTest.java =================================================================== --- solr/core/src/test/org/apache/solr/handler/XsltUpdateRequestHandlerTest.java (revision 1381159) +++ solr/core/src/test/org/apache/solr/handler/XsltUpdateRequestHandlerTest.java (working copy) @@ -64,29 +64,29 @@ " " + ""; - Map args = new HashMap(); - args.put(CommonParams.TR, "xsl-update-handler-test.xsl"); + Map args = new HashMap(); + args.put(CommonParams.TR, "xsl-update-handler-test.xsl"); - SolrCore core = h.getCore(); - LocalSolrQueryRequest req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); - ArrayList streams = new ArrayList(); - streams.add(new ContentStreamBase.StringStream(xml)); - req.setContentStreams(streams); - SolrQueryResponse rsp = new SolrQueryResponse(); - UpdateRequestHandler handler = new UpdateRequestHandler(); - handler.init(new NamedList()); - handler.handleRequestBody(req, rsp); - StringWriter sw = new StringWriter(32000); - QueryResponseWriter responseWriter = core.getQueryResponseWriter(req); - responseWriter.write(sw,req,rsp); - req.close(); - String response = sw.toString(); - assertU(response); + SolrCore core = h.getCore(); + LocalSolrQueryRequest req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); + ArrayList streams = new ArrayList(); + streams.add(new ContentStreamBase.StringStream(xml)); + req.setContentStreams(streams); + SolrQueryResponse rsp = new SolrQueryResponse(); + UpdateRequestHandler handler = new UpdateRequestHandler(); + handler.init(new NamedList()); + handler.handleRequestBody(req, rsp); + StringWriter sw = new StringWriter(32000); + QueryResponseWriter responseWriter = core.getQueryResponseWriter(req); + responseWriter.write(sw,req,rsp); + req.close(); + String response = sw.toString(); + assertU(response); assertU(commit()); assertQ("test document was correctly committed", req("q","*:*") , "//result[@numFound='1']" , "//int[@name='id'][.='12345']" - ); + ); } } Index: solr/core/src/test/org/apache/solr/handler/XmlUpdateRequestHandlerTest.java =================================================================== --- solr/core/src/test/org/apache/solr/handler/XmlUpdateRequestHandlerTest.java (revision 1381159) +++ solr/core/src/test/org/apache/solr/handler/XmlUpdateRequestHandlerTest.java (working copy) @@ -112,65 +112,65 @@ @Test public void testReadDelete() throws Exception { - String xml = - "" + - " " + - " id:150" + - " 150" + - " 200" + - " id:200" + - " " + - " " + - " id:150" + - " " + - " " + - " 150" + - " " + - ""; - - MockUpdateRequestProcessor p = new MockUpdateRequestProcessor(null); - p.expectDelete(null, "id:150", -1); - p.expectDelete("150", null, -1); - p.expectDelete("200", null, -1); - p.expectDelete(null, "id:200", -1); - p.expectDelete(null, "id:150", 500); - p.expectDelete("150", null, -1); + String xml = + "" + + " " + + " id:150" + + " 150" + + " 200" + + " id:200" + + " " + + " " + + " id:150" + + " " + + " " + + " 150" + + " " + + ""; - XMLLoader loader = new XMLLoader().init(null); - loader.load(req(), new SolrQueryResponse(), new ContentStreamBase.StringStream(xml), p); - - p.assertNoCommandsPending(); - } - - private class MockUpdateRequestProcessor extends UpdateRequestProcessor { - - private Queue deleteCommands = new LinkedList(); - - public MockUpdateRequestProcessor(UpdateRequestProcessor next) { - super(next); - } - - public void expectDelete(String id, String query, int commitWithin) { - DeleteUpdateCommand cmd = new DeleteUpdateCommand(null); - cmd.id = id; - cmd.query = query; - cmd.commitWithin = commitWithin; - deleteCommands.add(cmd); - } - - public void assertNoCommandsPending() { - assertTrue(deleteCommands.isEmpty()); - } - - @Override - public void processDelete(DeleteUpdateCommand cmd) throws IOException { - DeleteUpdateCommand expected = deleteCommands.poll(); - assertNotNull("Unexpected delete command: [" + cmd + "]", expected); - assertTrue("Expected [" + expected + "] but found [" + cmd + "]", - ObjectUtils.equals(expected.id, cmd.id) && - ObjectUtils.equals(expected.query, cmd.query) && - expected.commitWithin==cmd.commitWithin); - } - } + MockUpdateRequestProcessor p = new MockUpdateRequestProcessor(null); + p.expectDelete(null, "id:150", -1); + p.expectDelete("150", null, -1); + p.expectDelete("200", null, -1); + p.expectDelete(null, "id:200", -1); + p.expectDelete(null, "id:150", 500); + p.expectDelete("150", null, -1); + XMLLoader loader = new XMLLoader().init(null); + loader.load(req(), new SolrQueryResponse(), new ContentStreamBase.StringStream(xml), p); + + p.assertNoCommandsPending(); + } + + private class MockUpdateRequestProcessor extends UpdateRequestProcessor { + + private Queue deleteCommands = new LinkedList(); + + public MockUpdateRequestProcessor(UpdateRequestProcessor next) { + super(next); + } + + public void expectDelete(String id, String query, int commitWithin) { + DeleteUpdateCommand cmd = new DeleteUpdateCommand(null); + cmd.id = id; + cmd.query = query; + cmd.commitWithin = commitWithin; + deleteCommands.add(cmd); + } + + public void assertNoCommandsPending() { + assertTrue(deleteCommands.isEmpty()); + } + + @Override + public void processDelete(DeleteUpdateCommand cmd) throws IOException { + DeleteUpdateCommand expected = deleteCommands.poll(); + assertNotNull("Unexpected delete command: [" + cmd + "]", expected); + assertTrue("Expected [" + expected + "] but found [" + cmd + "]", + ObjectUtils.equals(expected.id, cmd.id) && + ObjectUtils.equals(expected.query, cmd.query) && + expected.commitWithin==cmd.commitWithin); + } + } + } Index: solr/core/src/test/org/apache/solr/handler/component/SpellCheckComponentTest.java =================================================================== --- solr/core/src/test/org/apache/solr/handler/component/SpellCheckComponentTest.java (revision 1381159) +++ solr/core/src/test/org/apache/solr/handler/component/SpellCheckComponentTest.java (working copy) @@ -206,55 +206,55 @@ @Test public void testThresholdTokenFrequency() throws Exception { - - //"document" is in 2 documents but "another" is only in 1. - //So with a threshold of 29%, "another" is absent from the dictionary - //while "document" is present. - - assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","documenq", SpellingParams.SPELLCHECK_DICT, "threshold", SpellingParams.SPELLCHECK_COUNT,"5", SpellingParams.SPELLCHECK_EXTENDED_RESULTS,"true") - ,"/spellcheck/suggestions/[1]/suggestion==[{'word':'document','freq':2}]" - ); - - assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","documenq", SpellingParams.SPELLCHECK_DICT, "threshold_direct", SpellingParams.SPELLCHECK_COUNT,"5", SpellingParams.SPELLCHECK_EXTENDED_RESULTS,"true") - ,"/spellcheck/suggestions/[1]/suggestion==[{'word':'document','freq':2}]" - ); - - //TODO: how do we make this into a 1-liner using "assertQ()" ??? - SolrCore core = h.getCore(); - SearchComponent speller = core.getSearchComponent("spellcheck"); - assertTrue("speller is null and it shouldn't be", speller != null); - - ModifiableSolrParams params = new ModifiableSolrParams(); - params.add(SpellCheckComponent.COMPONENT_NAME, "true"); - params.add(SpellingParams.SPELLCHECK_COUNT, "10"); - params.add(SpellingParams.SPELLCHECK_DICT, "threshold"); - params.add(SpellingParams.SPELLCHECK_EXTENDED_RESULTS,"true"); - params.add(CommonParams.Q, "anotheq"); - - SolrRequestHandler handler = core.getRequestHandler("spellCheckCompRH"); - SolrQueryResponse rsp = new SolrQueryResponse(); - rsp.add("responseHeader", new SimpleOrderedMap()); - SolrQueryRequest req = new LocalSolrQueryRequest(core, params); - handler.handleRequest(req, rsp); - req.close(); - NamedList values = rsp.getValues(); - NamedList spellCheck = (NamedList) values.get("spellcheck"); - NamedList suggestions = (NamedList) spellCheck.get("suggestions"); - assertTrue(suggestions.get("suggestion")==null); - assertTrue((Boolean) suggestions.get("correctlySpelled")==false); - - params.remove(SpellingParams.SPELLCHECK_DICT); - params.add(SpellingParams.SPELLCHECK_DICT, "threshold_direct"); - rsp = new SolrQueryResponse(); - rsp.add("responseHeader", new SimpleOrderedMap()); - req = new LocalSolrQueryRequest(core, params); - handler.handleRequest(req, rsp); - req.close(); - values = rsp.getValues(); - spellCheck = (NamedList) values.get("spellcheck"); - suggestions = (NamedList) spellCheck.get("suggestions"); - assertTrue(suggestions.get("suggestion")==null); - - assertTrue((Boolean) suggestions.get("correctlySpelled")==false); + + //"document" is in 2 documents but "another" is only in 1. + //So with a threshold of 29%, "another" is absent from the dictionary + //while "document" is present. + + assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","documenq", SpellingParams.SPELLCHECK_DICT, "threshold", SpellingParams.SPELLCHECK_COUNT,"5", SpellingParams.SPELLCHECK_EXTENDED_RESULTS,"true") + ,"/spellcheck/suggestions/[1]/suggestion==[{'word':'document','freq':2}]" + ); + + assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","documenq", SpellingParams.SPELLCHECK_DICT, "threshold_direct", SpellingParams.SPELLCHECK_COUNT,"5", SpellingParams.SPELLCHECK_EXTENDED_RESULTS,"true") + ,"/spellcheck/suggestions/[1]/suggestion==[{'word':'document','freq':2}]" + ); + + //TODO: how do we make this into a 1-liner using "assertQ()" ??? + SolrCore core = h.getCore(); + SearchComponent speller = core.getSearchComponent("spellcheck"); + assertTrue("speller is null and it shouldn't be", speller != null); + + ModifiableSolrParams params = new ModifiableSolrParams(); + params.add(SpellCheckComponent.COMPONENT_NAME, "true"); + params.add(SpellingParams.SPELLCHECK_COUNT, "10"); + params.add(SpellingParams.SPELLCHECK_DICT, "threshold"); + params.add(SpellingParams.SPELLCHECK_EXTENDED_RESULTS,"true"); + params.add(CommonParams.Q, "anotheq"); + + SolrRequestHandler handler = core.getRequestHandler("spellCheckCompRH"); + SolrQueryResponse rsp = new SolrQueryResponse(); + rsp.add("responseHeader", new SimpleOrderedMap()); + SolrQueryRequest req = new LocalSolrQueryRequest(core, params); + handler.handleRequest(req, rsp); + req.close(); + NamedList values = rsp.getValues(); + NamedList spellCheck = (NamedList) values.get("spellcheck"); + NamedList suggestions = (NamedList) spellCheck.get("suggestions"); + assertTrue(suggestions.get("suggestion")==null); + assertTrue((Boolean) suggestions.get("correctlySpelled")==false); + + params.remove(SpellingParams.SPELLCHECK_DICT); + params.add(SpellingParams.SPELLCHECK_DICT, "threshold_direct"); + rsp = new SolrQueryResponse(); + rsp.add("responseHeader", new SimpleOrderedMap()); + req = new LocalSolrQueryRequest(core, params); + handler.handleRequest(req, rsp); + req.close(); + values = rsp.getValues(); + spellCheck = (NamedList) values.get("spellcheck"); + suggestions = (NamedList) spellCheck.get("suggestions"); + assertTrue(suggestions.get("suggestion")==null); + + assertTrue((Boolean) suggestions.get("correctlySpelled")==false); } } Index: solr/core/src/test/org/apache/solr/handler/component/StatsComponentTest.java =================================================================== --- solr/core/src/test/org/apache/solr/handler/component/StatsComponentTest.java (revision 1381159) +++ solr/core/src/test/org/apache/solr/handler/component/StatsComponentTest.java (working copy) @@ -261,34 +261,34 @@ } public void doTestFacetStatisticsMissingResult(String f) throws Exception { - assertU(adoc("id", "1", f, "10", "active_s", "true")); - assertU(adoc("id", "2", f, "20", "active_s", "true")); - assertU(adoc("id", "3", "active_s", "false")); - assertU(adoc("id", "4", f, "40", "active_s", "false")); - assertU(commit()); + assertU(adoc("id", "1", f, "10", "active_s", "true")); + assertU(adoc("id", "2", f, "20", "active_s", "true")); + assertU(adoc("id", "3", "active_s", "false")); + assertU(adoc("id", "4", f, "40", "active_s", "false")); + assertU(commit()); - assertQ("test value for active_s=true", req("q","*:*", "stats","true", "stats.field",f, "stats.facet","active_s") - , "//lst[@name='true']/double[@name='min'][.='10.0']" - , "//lst[@name='true']/double[@name='max'][.='20.0']" - , "//lst[@name='true']/double[@name='sum'][.='30.0']" - , "//lst[@name='true']/long[@name='count'][.='2']" - , "//lst[@name='true']/long[@name='missing'][.='0']" - , "//lst[@name='true']/double[@name='sumOfSquares'][.='500.0']" - , "//lst[@name='true']/double[@name='mean'][.='15.0']" - , "//lst[@name='true']/double[@name='stddev'][.='7.0710678118654755']" - ); + assertQ("test value for active_s=true", req("q","*:*", "stats","true", "stats.field",f, "stats.facet","active_s") + , "//lst[@name='true']/double[@name='min'][.='10.0']" + , "//lst[@name='true']/double[@name='max'][.='20.0']" + , "//lst[@name='true']/double[@name='sum'][.='30.0']" + , "//lst[@name='true']/long[@name='count'][.='2']" + , "//lst[@name='true']/long[@name='missing'][.='0']" + , "//lst[@name='true']/double[@name='sumOfSquares'][.='500.0']" + , "//lst[@name='true']/double[@name='mean'][.='15.0']" + , "//lst[@name='true']/double[@name='stddev'][.='7.0710678118654755']" + ); - assertQ("test value for active_s=false", req("q","*:*", "stats","true", "stats.field",f, "stats.facet","active_s") - , "//lst[@name='false']/double[@name='min'][.='40.0']" - , "//lst[@name='false']/double[@name='max'][.='40.0']" - , "//lst[@name='false']/double[@name='sum'][.='40.0']" - , "//lst[@name='false']/long[@name='count'][.='1']" - , "//lst[@name='false']/long[@name='missing'][.='1']" - , "//lst[@name='false']/double[@name='sumOfSquares'][.='1600.0']" - , "//lst[@name='false']/double[@name='mean'][.='40.0']" - , "//lst[@name='false']/double[@name='stddev'][.='0.0']" - ); - } + assertQ("test value for active_s=false", req("q","*:*", "stats","true", "stats.field",f, "stats.facet","active_s") + , "//lst[@name='false']/double[@name='min'][.='40.0']" + , "//lst[@name='false']/double[@name='max'][.='40.0']" + , "//lst[@name='false']/double[@name='sum'][.='40.0']" + , "//lst[@name='false']/long[@name='count'][.='1']" + , "//lst[@name='false']/long[@name='missing'][.='1']" + , "//lst[@name='false']/double[@name='sumOfSquares'][.='1600.0']" + , "//lst[@name='false']/double[@name='mean'][.='40.0']" + , "//lst[@name='false']/double[@name='stddev'][.='0.0']" + ); + } public void testFieldStatisticsResultsNumericFieldAlwaysMissing() throws Exception { SolrCore core = h.getCore(); Index: solr/core/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java =================================================================== --- solr/core/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java (revision 1381159) +++ solr/core/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java (working copy) @@ -42,12 +42,12 @@ private String requestHandlerName; private String reqHandlerWithWordbreak; - public DistributedSpellCheckComponentTest() - { - //fixShardCount=true; - //shardCount=2; - //stress=0; - } + public DistributedSpellCheckComponentTest() + { + //fixShardCount=true; + //shardCount=2; + //stress=0; + } @BeforeClass public static void beforeClass() throws Exception { @@ -100,7 +100,7 @@ @Override public void doTest() throws Exception { - del("*:*"); + del("*:*"); index(id, "1", "lowerfilt", "toyota"); index(id, "2", "lowerfilt", "chevrolet"); index(id, "3", "lowerfilt", "suzuki"); Index: solr/core/src/test/org/apache/solr/TestDistributedGrouping.java =================================================================== --- solr/core/src/test/org/apache/solr/TestDistributedGrouping.java (revision 1381159) +++ solr/core/src/test/org/apache/solr/TestDistributedGrouping.java (working copy) @@ -140,7 +140,7 @@ commit(); - // test grouping + // test grouping // The second sort = id asc . The sorting behaviour is different in dist mode. See TopDocs#merge // The shard the result came from matters in the order if both document sortvalues are equal query("q", "*:*", "rows", 100, "fl", "id," + i1, "group", "true", "group.field", i1, "group.limit", 10, "sort", i1 + " asc, id asc"); Index: solr/core/src/test/org/apache/solr/update/UpdateParamsTest.java =================================================================== --- solr/core/src/test/org/apache/solr/update/UpdateParamsTest.java (revision 1381159) +++ solr/core/src/test/org/apache/solr/update/UpdateParamsTest.java (working copy) @@ -54,8 +54,8 @@ // First check that the old param behaves as it should try { - handler.handleRequestBody(req, rsp); - assertTrue("Old param update.processor should not have any effect anymore", true); + handler.handleRequestBody(req, rsp); + assertTrue("Old param update.processor should not have any effect anymore", true); } catch (Exception e) { assertFalse("Got wrong exception while testing update.chain", e.getMessage().equals("unknown UpdateRequestProcessorChain: nonexistant")); } @@ -65,10 +65,10 @@ params.getMap().put(UpdateParams.UPDATE_CHAIN, "nonexistant"); req.setParams(params); try { - handler.handleRequestBody(req, rsp); - assertFalse("Faulty update.chain parameter not causing an error - i.e. it is not detected", true); + handler.handleRequestBody(req, rsp); + assertFalse("Faulty update.chain parameter not causing an error - i.e. it is not detected", true); } catch (Exception e) { - assertEquals("Got wrong exception while testing update.chain", e.getMessage(), "unknown UpdateRequestProcessorChain: nonexistant"); + assertEquals("Got wrong exception while testing update.chain", e.getMessage(), "unknown UpdateRequestProcessorChain: nonexistant"); } } Index: solr/core/src/test/org/apache/solr/highlight/HighlighterConfigTest.java =================================================================== --- solr/core/src/test/org/apache/solr/highlight/HighlighterConfigTest.java (revision 1381159) +++ solr/core/src/test/org/apache/solr/highlight/HighlighterConfigTest.java (working copy) @@ -23,47 +23,47 @@ import java.util.HashMap; public class HighlighterConfigTest extends AbstractSolrTestCase { - @Override public String getSchemaFile() { return "schema.xml"; } - // the default case (i.e. without a class attribute) is tested every time sorlconfig.xml is used - @Override public String getSolrConfigFile() { return "solrconfig-highlight.xml"; } + @Override public String getSchemaFile() { return "schema.xml"; } + // the default case (i.e. without a class attribute) is tested every time sorlconfig.xml is used + @Override public String getSolrConfigFile() { return "solrconfig-highlight.xml"; } - @Override - public void setUp() throws Exception { - // if you override setUp or tearDown, you better call - // the super classes version - super.setUp(); - } - - @Override - public void tearDown() throws Exception { - // if you override setUp or tearDown, you better call - // the super classes version - super.tearDown(); - } - - public void testConfig() - { + @Override + public void setUp() throws Exception { + // if you override setUp or tearDown, you better call + // the super classes version + super.setUp(); + } + + @Override + public void tearDown() throws Exception { + // if you override setUp or tearDown, you better call + // the super classes version + super.tearDown(); + } + + public void testConfig() + { SolrHighlighter highlighter = HighlightComponent.getHighlighter(h.getCore()); - log.info( "highlighter" ); + log.info( "highlighter" ); - assertTrue( highlighter instanceof DummyHighlighter ); - - // check to see that doHighlight is called from the DummyHighlighter - HashMap args = new HashMap(); - args.put("hl", "true"); - args.put("df", "t_text"); - args.put("hl.fl", ""); - TestHarness.LocalRequestFactory sumLRF = h.getRequestFactory( - "standard", 0, 200, args); - - assertU(adoc("t_text", "a long day's night", "id", "1")); - assertU(commit()); - assertU(optimize()); - assertQ("Basic summarization", - sumLRF.makeRequest("long"), - "//lst[@name='highlighting']/str[@name='dummy']" - ); - } + assertTrue( highlighter instanceof DummyHighlighter ); + + // check to see that doHighlight is called from the DummyHighlighter + HashMap args = new HashMap(); + args.put("hl", "true"); + args.put("df", "t_text"); + args.put("hl.fl", ""); + TestHarness.LocalRequestFactory sumLRF = h.getRequestFactory( + "standard", 0, 200, args); + + assertU(adoc("t_text", "a long day's night", "id", "1")); + assertU(commit()); + assertU(optimize()); + assertQ("Basic summarization", + sumLRF.makeRequest("long"), + "//lst[@name='highlighting']/str[@name='dummy']" + ); + } } Index: solr/core/src/test/org/apache/solr/highlight/DummyHighlighter.java =================================================================== --- solr/core/src/test/org/apache/solr/highlight/DummyHighlighter.java (revision 1381159) +++ solr/core/src/test/org/apache/solr/highlight/DummyHighlighter.java (working copy) @@ -27,17 +27,17 @@ public class DummyHighlighter extends SolrHighlighter { - @Override - public NamedList doHighlighting(DocList docs, Query query, - SolrQueryRequest req, String[] defaultFields) throws IOException { - NamedList fragments = new SimpleOrderedMap(); - fragments.add("dummy", "thing1"); - return fragments; - } + @Override + public NamedList doHighlighting(DocList docs, Query query, + SolrQueryRequest req, String[] defaultFields) throws IOException { + NamedList fragments = new SimpleOrderedMap(); + fragments.add("dummy", "thing1"); + return fragments; + } - @Override - public void initalize(SolrConfig config) { - // do nothing - } + @Override + public void initalize(SolrConfig config) { + // do nothing + } } Index: solr/core/src/java/org/apache/solr/core/Config.java =================================================================== --- solr/core/src/java/org/apache/solr/core/Config.java (revision 1381159) +++ solr/core/src/java/org/apache/solr/core/Config.java (working copy) @@ -141,8 +141,8 @@ SolrException.log(log, "Exception during parsing file: " + name, e); throw e; } catch( SolrException e ){ - SolrException.log(log,"Error in "+name,e); - throw e; + SolrException.log(log,"Error in "+name,e); + throw e; } } Index: solr/core/src/java/org/apache/solr/core/CoreContainer.java =================================================================== --- solr/core/src/java/org/apache/solr/core/CoreContainer.java (revision 1381159) +++ solr/core/src/java/org/apache/solr/core/CoreContainer.java (working copy) @@ -543,7 +543,7 @@ } opt = DOMUtil.getAttr(node, CORE_ROLES, null); if(opt != null){ - p.getCloudDescriptor().setRoles(opt); + p.getCloudDescriptor().setRoles(opt); } } opt = DOMUtil.getAttr(node, CORE_PROPERTIES, null); Index: solr/core/src/java/org/apache/solr/response/RawResponseWriter.java =================================================================== --- solr/core/src/java/org/apache/solr/response/RawResponseWriter.java (revision 1381159) +++ solr/core/src/java/org/apache/solr/response/RawResponseWriter.java (working copy) @@ -96,7 +96,7 @@ } public void write(OutputStream out, SolrQueryRequest request, - SolrQueryResponse response) throws IOException { + SolrQueryResponse response) throws IOException { Object obj = response.getValues().get( CONTENT ); if( obj != null && (obj instanceof ContentStream ) ) { // copy the contents to the writer... @@ -110,8 +110,8 @@ } else { //getBaseWriter( request ).write( writer, request, response ); - throw new IOException("did not find a CONTENT object"); + throw new IOException("did not find a CONTENT object"); } - + + } } -} Index: solr/core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java =================================================================== --- solr/core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java (revision 1381159) +++ solr/core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java (working copy) @@ -173,10 +173,10 @@ @Override public void writeMapOpener(int size) throws IOException, IllegalArgumentException { - // negative size value indicates that something has gone wrong - if (size < 0) { - throw new IllegalArgumentException("Map size must not be negative"); - } + // negative size value indicates that something has gone wrong + if (size < 0) { + throw new IllegalArgumentException("Map size must not be negative"); + } writer.write("a:"+size+":{"); } @@ -192,10 +192,10 @@ @Override public void writeArrayOpener(int size) throws IOException, IllegalArgumentException { - // negative size value indicates that something has gone wrong - if (size < 0) { - throw new IllegalArgumentException("Array size must not be negative"); - } + // negative size value indicates that something has gone wrong + if (size < 0) { + throw new IllegalArgumentException("Array size must not be negative"); + } writer.write("a:"+size+":{"); } Index: solr/core/src/java/org/apache/solr/response/SolrQueryResponse.java =================================================================== --- solr/core/src/java/org/apache/solr/response/SolrQueryResponse.java (revision 1381159) +++ solr/core/src/java/org/apache/solr/response/SolrQueryResponse.java (working copy) @@ -194,8 +194,8 @@ /** Repsonse header to be logged */ public NamedList getResponseHeader() { @SuppressWarnings("unchecked") - SimpleOrderedMap header = (SimpleOrderedMap) values.get("responseHeader"); - return header; + SimpleOrderedMap header = (SimpleOrderedMap) values.get("responseHeader"); + return header; } /** Add a value to be logged. @@ -204,7 +204,7 @@ * @param val value of the thing to log */ public void addToLog(String name, Object val) { - toLog.add(name, val); + toLog.add(name, val); } /** Get loggable items. @@ -212,7 +212,7 @@ * @return things to log */ public NamedList getToLog() { - return toLog; + return toLog; } /** Index: solr/core/src/java/org/apache/solr/schema/SchemaField.java =================================================================== --- solr/core/src/java/org/apache/solr/schema/SchemaField.java (revision 1381159) +++ solr/core/src/java/org/apache/solr/schema/SchemaField.java (working copy) @@ -182,7 +182,7 @@ String defaultValue = null; if( props.containsKey( "default" ) ) { - defaultValue = props.get( "default" ); + defaultValue = props.get( "default" ); } return new SchemaField(name, ft, calcProps(name, ft, props), defaultValue ); } Index: solr/core/src/java/org/apache/solr/search/QParser.java =================================================================== --- solr/core/src/java/org/apache/solr/search/QParser.java (revision 1381159) +++ solr/core/src/java/org/apache/solr/search/QParser.java (working copy) @@ -224,18 +224,18 @@ String pageScoreS = null; String pageDocS = null; - pageScoreS = params.get(CommonParams.PAGESCORE); - pageDocS = params.get(CommonParams.PAGEDOC); - - if (pageScoreS == null || pageDocS == null) - return null; - - int pageDoc = pageDocS != null ? Integer.parseInt(pageDocS) : -1; - float pageScore = pageScoreS != null ? new Float(pageScoreS) : -1; - if(pageDoc != -1 && pageScore != -1){ + pageScoreS = params.get(CommonParams.PAGESCORE); + pageDocS = params.get(CommonParams.PAGEDOC); + + if (pageScoreS == null || pageDocS == null) + return null; + + int pageDoc = pageDocS != null ? Integer.parseInt(pageDocS) : -1; + float pageScore = pageScoreS != null ? new Float(pageScoreS) : -1; + if(pageDoc != -1 && pageScore != -1){ return new ScoreDoc(pageDoc, pageScore); } - else { + else { return null; } Index: solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java =================================================================== --- solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java (revision 1381159) +++ solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java (working copy) @@ -1370,7 +1370,7 @@ TopDocsCollector topCollector; if (cmd.getSort() == null) { if(cmd.getScoreDoc() != null) { - topCollector = TopScoreDocCollector.create(len, cmd.getScoreDoc(), true); //create the Collector with InOrderPagingCollector + topCollector = TopScoreDocCollector.create(len, cmd.getScoreDoc(), true); //create the Collector with InOrderPagingCollector } else { topCollector = TopScoreDocCollector.create(len, true); } @@ -2030,11 +2030,11 @@ public ScoreDoc getScoreDoc() { - return scoreDoc; + return scoreDoc; } public void setScoreDoc(ScoreDoc scoreDoc) { - this.scoreDoc = scoreDoc; + this.scoreDoc = scoreDoc; } //Issue 1726 end Index: solr/core/src/java/org/apache/solr/cloud/CloudDescriptor.java =================================================================== --- solr/core/src/java/org/apache/solr/cloud/CloudDescriptor.java (revision 1381159) +++ solr/core/src/java/org/apache/solr/cloud/CloudDescriptor.java (working copy) @@ -49,11 +49,11 @@ } public String getRoles(){ - return roles; + return roles; } public void setRoles(String roles){ - this.roles = roles; + this.roles = roles; } /** Optional parameters that can change how a core is created. */ Index: solr/core/src/java/org/apache/solr/spelling/DirectSolrSpellChecker.java =================================================================== --- solr/core/src/java/org/apache/solr/spelling/DirectSolrSpellChecker.java (revision 1381159) +++ solr/core/src/java/org/apache/solr/spelling/DirectSolrSpellChecker.java (working copy) @@ -214,7 +214,7 @@ result.add(token, empty); } else { for (SuggestWord suggestion : suggestions) { - result.add(token, suggestion.string, suggestion.freq); + result.add(token, suggestion.string, suggestion.freq); } } } Index: solr/core/src/java/org/apache/solr/spelling/ConjunctionSolrSpellChecker.java =================================================================== --- solr/core/src/java/org/apache/solr/spelling/ConjunctionSolrSpellChecker.java (revision 1381159) +++ solr/core/src/java/org/apache/solr/spelling/ConjunctionSolrSpellChecker.java (working copy) @@ -139,7 +139,7 @@ Map combinedTokenFrequency = new HashMap(); Map>> allSuggestions = new LinkedHashMap>>(); for(SpellingResult result : results) { - if(result.getTokenFrequency()!=null) { + if(result.getTokenFrequency()!=null) { combinedTokenFrequency.putAll(result.getTokenFrequency()); } for(Map.Entry> entry : result.getSuggestions().entrySet()) { Index: solr/core/src/java/org/apache/solr/spelling/AbstractLuceneSpellChecker.java =================================================================== --- solr/core/src/java/org/apache/solr/spelling/AbstractLuceneSpellChecker.java (revision 1381159) +++ solr/core/src/java/org/apache/solr/spelling/AbstractLuceneSpellChecker.java (working copy) @@ -142,7 +142,7 @@ @Override public SpellingResult getSuggestions(SpellingOptions options) throws IOException { - SpellingResult result = new SpellingResult(options.tokens); + SpellingResult result = new SpellingResult(options.tokens); IndexReader reader = determineReader(options.reader); Term term = field != null ? new Term(field, "") : null; float theAccuracy = (options.accuracy == Float.MIN_VALUE) ? spellChecker.getAccuracy() : options.accuracy; @@ -187,13 +187,13 @@ int countLimit = Math.min(options.count, suggestions.length); if(countLimit>0) { - for (int i = 0; i < countLimit; i++) { - term = new Term(field, suggestions[i]); - result.add(token, suggestions[i], reader.docFreq(term)); - } + for (int i = 0; i < countLimit; i++) { + term = new Term(field, suggestions[i]); + result.add(token, suggestions[i], reader.docFreq(term)); + } } else { - List suggList = Collections.emptyList(); - result.add(token, suggList); + List suggList = Collections.emptyList(); + result.add(token, suggList); } } else { if (suggestions.length > 0) { @@ -203,8 +203,8 @@ } result.add(token, suggList); } else { - List suggList = Collections.emptyList(); - result.add(token, suggList); + List suggList = Collections.emptyList(); + result.add(token, suggList); } } } Index: solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java =================================================================== --- solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java (revision 1381159) +++ solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java (working copy) @@ -191,42 +191,42 @@ } @SuppressWarnings("unchecked") - protected void addCollationsToResponse(SolrParams params, SpellingResult spellingResult, ResponseBuilder rb, String q, - NamedList response, boolean suggestionsMayOverlap) { - int maxCollations = params.getInt(SPELLCHECK_MAX_COLLATIONS, 1); - int maxCollationTries = params.getInt(SPELLCHECK_MAX_COLLATION_TRIES, 0); - int maxCollationEvaluations = params.getInt(SPELLCHECK_MAX_COLLATION_EVALUATIONS, 10000); - boolean collationExtendedResults = params.getBool(SPELLCHECK_COLLATE_EXTENDED_RESULTS, false); - boolean shard = params.getBool(ShardParams.IS_SHARD, false); + protected void addCollationsToResponse(SolrParams params, SpellingResult spellingResult, ResponseBuilder rb, String q, + NamedList response, boolean suggestionsMayOverlap) { + int maxCollations = params.getInt(SPELLCHECK_MAX_COLLATIONS, 1); + int maxCollationTries = params.getInt(SPELLCHECK_MAX_COLLATION_TRIES, 0); + int maxCollationEvaluations = params.getInt(SPELLCHECK_MAX_COLLATION_EVALUATIONS, 10000); + boolean collationExtendedResults = params.getBool(SPELLCHECK_COLLATE_EXTENDED_RESULTS, false); + boolean shard = params.getBool(ShardParams.IS_SHARD, false); - SpellCheckCollator collator = new SpellCheckCollator(); - List collations = collator.collate(spellingResult, q, rb, maxCollations, maxCollationTries, maxCollationEvaluations, suggestionsMayOverlap); + SpellCheckCollator collator = new SpellCheckCollator(); + List collations = collator.collate(spellingResult, q, rb, maxCollations, maxCollationTries, maxCollationEvaluations, suggestionsMayOverlap); //by sorting here we guarantee a non-distributed request returns all - //results in the same order as a distributed request would, - //even in cases when the internal rank is the same. - Collections.sort(collations); - - for (SpellCheckCollation collation : collations) { - if (collationExtendedResults) { - NamedList extendedResult = new NamedList(); - extendedResult.add("collationQuery", collation.getCollationQuery()); - extendedResult.add("hits", collation.getHits()); - extendedResult.add("misspellingsAndCorrections", collation.getMisspellingsAndCorrections()); - if(maxCollationTries>0 && shard) - { - extendedResult.add("collationInternalRank", collation.getInternalRank()); - } - response.add("collation", extendedResult); - } else { - response.add("collation", collation.getCollationQuery()); - if(maxCollationTries>0 && shard) - { - response.add("collationInternalRank", collation.getInternalRank()); - } - } - } - } + //results in the same order as a distributed request would, + //even in cases when the internal rank is the same. + Collections.sort(collations); + for (SpellCheckCollation collation : collations) { + if (collationExtendedResults) { + NamedList extendedResult = new NamedList(); + extendedResult.add("collationQuery", collation.getCollationQuery()); + extendedResult.add("hits", collation.getHits()); + extendedResult.add("misspellingsAndCorrections", collation.getMisspellingsAndCorrections()); + if(maxCollationTries>0 && shard) + { + extendedResult.add("collationInternalRank", collation.getInternalRank()); + } + response.add("collation", extendedResult); + } else { + response.add("collation", collation.getCollationQuery()); + if(maxCollationTries>0 && shard) + { + response.add("collationInternalRank", collation.getInternalRank()); + } + } + } + } + /** * For every param that is of the form "spellcheck.[dictionary name].XXXX=YYYY, add * XXXX=YYYY as a param to the custom param list @@ -297,8 +297,8 @@ NamedList nl = (NamedList) srsp.getSolrResponse().getResponse().get("spellcheck"); LOG.info(srsp.getShard() + " " + nl); if (nl != null) { - mergeData.totalNumberShardResponses++; - collectShardSuggestions(nl, mergeData); + mergeData.totalNumberShardResponses++; + collectShardSuggestions(nl, mergeData); collectShardCollations(mergeData, nl, maxCollationTries); } } @@ -317,22 +317,22 @@ SpellCheckCollation[] sortedCollations = mergeData.collations.values() .toArray(new SpellCheckCollation[mergeData.collations.size()]); Arrays.sort(sortedCollations); - int i = 0; - while (i < maxCollations && i < sortedCollations.length) { - SpellCheckCollation collation = sortedCollations[i]; - i++; - if (collationExtendedResults) { - NamedList extendedResult = new NamedList(); - extendedResult.add("collationQuery", collation.getCollationQuery()); - extendedResult.add("hits", collation.getHits()); - extendedResult.add("misspellingsAndCorrections", collation - .getMisspellingsAndCorrections()); - suggestions.add("collation", extendedResult); - } else { - suggestions.add("collation", collation.getCollationQuery()); - } - } - } + int i = 0; + while (i < maxCollations && i < sortedCollations.length) { + SpellCheckCollation collation = sortedCollations[i]; + i++; + if (collationExtendedResults) { + NamedList extendedResult = new NamedList(); + extendedResult.add("collationQuery", collation.getCollationQuery()); + extendedResult.add("hits", collation.getHits()); + extendedResult.add("misspellingsAndCorrections", collation + .getMisspellingsAndCorrections()); + suggestions.add("collation", extendedResult); + } else { + suggestions.add("collation", collation.getCollationQuery()); + } + } + } response.add("suggestions", suggestions); rb.rsp.add("spellcheck", response); Index: solr/core/src/java/org/apache/solr/handler/component/ResponseBuilder.java =================================================================== --- solr/core/src/java/org/apache/solr/handler/component/ResponseBuilder.java (revision 1381159) +++ solr/core/src/java/org/apache/solr/handler/component/ResponseBuilder.java (working copy) @@ -409,11 +409,11 @@ public ScoreDoc getScoreDoc() { - return scoreDoc; + return scoreDoc; } public void setScoreDoc(ScoreDoc scoreDoc) { - this.scoreDoc = scoreDoc; + this.scoreDoc = scoreDoc; } } Index: solr/core/src/java/org/apache/solr/handler/loader/CSVLoaderBase.java =================================================================== --- solr/core/src/java/org/apache/solr/handler/loader/CSVLoaderBase.java (revision 1381159) +++ solr/core/src/java/org/apache/solr/handler/loader/CSVLoaderBase.java (working copy) @@ -388,9 +388,9 @@ // add any literals for (SchemaField sf : literals.keySet()) { - String fn = sf.getName(); - String val = literals.get(sf); - doc.addField(fn, val); + String fn = sf.getName(); + String val = literals.get(sf); + doc.addField(fn, val); } template.solrDoc = doc; Index: solr/core/src/java/org/apache/solr/util/LongPriorityQueue.java =================================================================== --- solr/core/src/java/org/apache/solr/util/LongPriorityQueue.java (revision 1381159) +++ solr/core/src/java/org/apache/solr/util/LongPriorityQueue.java (working copy) @@ -152,10 +152,10 @@ time. Only valid if size() > 0. */ public long pop() { - long result = heap[1]; // save first value - heap[1] = heap[size]; // move last to first + long result = heap[1]; // save first value + heap[1] = heap[size]; // move last to first size--; - downHeap(); // adjust heap + downHeap(); // adjust heap return result; } @@ -187,11 +187,11 @@ */ public long[] sort(int n) { while (--n >= 0) { - long result = heap[1]; // save first value - heap[1] = heap[size]; // move last to first + long result = heap[1]; // save first value + heap[1] = heap[size]; // move last to first heap[size] = result; // place it last size--; - downHeap(); // adjust heap + downHeap(); // adjust heap } return heap; } @@ -203,26 +203,26 @@ private void upHeap() { int i = size; - long node = heap[i]; // save bottom node + long node = heap[i]; // save bottom node int j = i >>> 1; while (j > 0 && node < heap[j]) { - heap[i] = heap[j]; // shift parents down + heap[i] = heap[j]; // shift parents down i = j; j = j >>> 1; } - heap[i] = node; // install saved node + heap[i] = node; // install saved node } private void downHeap() { int i = 1; - long node = heap[i]; // save top node - int j = i << 1; // find smaller child + long node = heap[i]; // save top node + int j = i << 1; // find smaller child int k = j + 1; if (k <= size && heap[k] < heap[j]) { j = k; } while (j <= size && heap[j] < node) { - heap[i] = heap[j]; // shift up child + heap[i] = heap[j]; // shift up child i = j; j = i << 1; k = j + 1; @@ -230,6 +230,6 @@ j = k; } } - heap[i] = node; // install saved node + heap[i] = node; // install saved node } } Index: solr/solrj/src/test/org/apache/solr/client/solrj/response/TestSpellCheckResponse.java =================================================================== --- solr/solrj/src/test/org/apache/solr/client/solrj/response/TestSpellCheckResponse.java (revision 1381159) +++ solr/solrj/src/test/org/apache/solr/client/solrj/response/TestSpellCheckResponse.java (working copy) @@ -108,7 +108,7 @@ @Test public void testSpellCheckCollationResponse() throws Exception { - getSolrServer(); + getSolrServer(); server.deleteByQuery("*:*"); server.commit(true, true); SolrInputDocument doc = new SolrInputDocument(); @@ -156,27 +156,27 @@ assertEquals(2, collations.size()); for(Collation collation : collations) { - assertTrue("name:(+faith +hope +love)".equals(collation.getCollationQueryString()) || "name:(+faith +hope +loaves)".equals(collation.getCollationQueryString())); + assertTrue("name:(+faith +hope +love)".equals(collation.getCollationQueryString()) || "name:(+faith +hope +loaves)".equals(collation.getCollationQueryString())); assertTrue(collation.getNumberOfHits()==1); - - List misspellingsAndCorrections = collation.getMisspellingsAndCorrections(); - assertTrue(misspellingsAndCorrections.size()==3); - for(Correction correction : misspellingsAndCorrections) - { - if("fauth".equals(correction.getOriginal())) - { - assertTrue("faith".equals(correction.getCorrection())); - } else if("home".equals(correction.getOriginal())) - { - assertTrue("hope".equals(correction.getCorrection())); - } else if("loane".equals(correction.getOriginal())) - { - assertTrue("love".equals(correction.getCorrection()) || "loaves".equals(correction.getCorrection())); - } else - { - fail("Original Word Should have been either fauth, home or loane."); - } - } + + List misspellingsAndCorrections = collation.getMisspellingsAndCorrections(); + assertTrue(misspellingsAndCorrections.size()==3); + for(Correction correction : misspellingsAndCorrections) + { + if("fauth".equals(correction.getOriginal())) + { + assertTrue("faith".equals(correction.getCorrection())); + } else if("home".equals(correction.getOriginal())) + { + assertTrue("hope".equals(correction.getCorrection())); + } else if("loane".equals(correction.getOriginal())) + { + assertTrue("love".equals(correction.getCorrection()) || "loaves".equals(correction.getCorrection())); + } else + { + fail("Original Word Should have been either fauth, home or loane."); + } + } } query.set(SpellingParams.SPELLCHECK_COLLATE_EXTENDED_RESULTS, false); Index: solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java =================================================================== --- solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java (revision 1381159) +++ solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java (working copy) @@ -141,7 +141,7 @@ public void command() { try { - ZkStateReader.this.createClusterStateWatchersAndUpdate(); + ZkStateReader.this.createClusterStateWatchersAndUpdate(); } catch (KeeperException e) { log.error("", e); throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, @@ -378,12 +378,12 @@ } abstract class RunnableWatcher implements Runnable { - Watcher watcher; - public RunnableWatcher(Watcher watcher){ - this.watcher = watcher; - } + Watcher watcher; + public RunnableWatcher(Watcher watcher){ + this.watcher = watcher; + } - } + } public String getLeaderUrl(String collection, String shard, int timeout) throws InterruptedException, KeeperException { Index: solr/solrj/src/java/org/apache/solr/common/util/NamedList.java =================================================================== --- solr/solrj/src/java/org/apache/solr/common/util/NamedList.java (revision 1381159) +++ solr/solrj/src/java/org/apache/solr/common/util/NamedList.java (working copy) @@ -367,12 +367,12 @@ @Override public String toString() { - return getKey()+"="+getValue(); + return getKey()+"="+getValue(); } - public T setValue(T value) { + public T setValue(T value) { return list.setVal(index, value); - } + } }; return nv; } Index: solr/solrj/src/java/org/apache/solr/client/solrj/beans/DocumentObjectBinder.java =================================================================== --- solr/solrj/src/java/org/apache/solr/client/solrj/beans/DocumentObjectBinder.java (revision 1381159) +++ solr/solrj/src/java/org/apache/solr/client/solrj/beans/DocumentObjectBinder.java (working copy) @@ -88,7 +88,7 @@ } else { doc.setField(field.name, field.get(obj), 1.0f); } - } + } return doc; } Index: solr/solrj/src/java/org/apache/solr/client/solrj/request/LukeRequest.java =================================================================== --- solr/solrj/src/java/org/apache/solr/client/solrj/request/LukeRequest.java (revision 1381159) +++ solr/solrj/src/java/org/apache/solr/client/solrj/request/LukeRequest.java (working copy) @@ -108,7 +108,7 @@ params.add( "numTerms", numTerms+"" ); } if (showSchema) { - params.add("show", "schema"); + params.add("show", "schema"); } return params; } Index: solr/solrj/src/java/org/apache/solr/client/solrj/response/SpellCheckResponse.java =================================================================== --- solr/solrj/src/java/org/apache/solr/client/solrj/response/SpellCheckResponse.java (revision 1381159) +++ solr/solrj/src/java/org/apache/solr/client/solrj/response/SpellCheckResponse.java (working copy) @@ -45,41 +45,41 @@ String n = sugg.getName(i); if ("correctlySpelled".equals(n)) { correctlySpelled = (Boolean) sugg.getVal(i); - } else if ("collationInternalRank".equals(n)){ - //continue; - } else if ("collation".equals(n)) { - List collationInfo = sugg.getAll(n); - collations = new ArrayList(collationInfo.size()); - for (Object o : collationInfo) { - if (o instanceof String) { - collations.add(new Collation() - .setCollationQueryString((String) o)); - } else if (o instanceof NamedList) { + } else if ("collationInternalRank".equals(n)){ + //continue; + } else if ("collation".equals(n)) { + List collationInfo = sugg.getAll(n); + collations = new ArrayList(collationInfo.size()); + for (Object o : collationInfo) { + if (o instanceof String) { + collations.add(new Collation() + .setCollationQueryString((String) o)); + } else if (o instanceof NamedList) { @SuppressWarnings("unchecked") - NamedList expandedCollation = (NamedList) o; - String collationQuery + NamedList expandedCollation = (NamedList) o; + String collationQuery = (String) expandedCollation.get("collationQuery"); - int hits = (Integer) expandedCollation.get("hits"); + int hits = (Integer) expandedCollation.get("hits"); @SuppressWarnings("unchecked") - NamedList misspellingsAndCorrections + NamedList misspellingsAndCorrections = (NamedList) expandedCollation.get("misspellingsAndCorrections"); - Collation collation = new Collation(); - collation.setCollationQueryString(collationQuery); - collation.setNumberOfHits(hits); + Collation collation = new Collation(); + collation.setCollationQueryString(collationQuery); + collation.setNumberOfHits(hits); - for (int ii = 0; ii < misspellingsAndCorrections.size(); ii++) { - String misspelling = misspellingsAndCorrections.getName(ii); - String correction = misspellingsAndCorrections.getVal(ii); - collation.addMisspellingsAndCorrection(new Correction( - misspelling, correction)); - } - collations.add(collation); - } else { - throw new AssertionError( - "Should get Lists of Strings or List of NamedLists here."); - } - } + for (int ii = 0; ii < misspellingsAndCorrections.size(); ii++) { + String misspelling = misspellingsAndCorrections.getName(ii); + String correction = misspellingsAndCorrections.getVal(ii); + collation.addMisspellingsAndCorrection(new Correction( + misspelling, correction)); + } + collations.add(collation); + } else { + throw new AssertionError( + "Should get Lists of Strings or List of NamedLists here."); + } + } } else { @SuppressWarnings("unchecked") Suggestion s = new Suggestion(n, (NamedList) sugg.getVal(i)); @@ -129,7 +129,7 @@ * @return all collations */ public List getCollatedResults() { - return collations; + return collations; } public static class Suggestion { @@ -219,62 +219,62 @@ } - public class Collation { - private String collationQueryString; - private List misspellingsAndCorrections = new ArrayList(); - private long numberOfHits; + public class Collation { + private String collationQueryString; + private List misspellingsAndCorrections = new ArrayList(); + private long numberOfHits; - public long getNumberOfHits() { - return numberOfHits; - } + public long getNumberOfHits() { + return numberOfHits; + } - public void setNumberOfHits(long numberOfHits) { - this.numberOfHits = numberOfHits; - } + public void setNumberOfHits(long numberOfHits) { + this.numberOfHits = numberOfHits; + } - public String getCollationQueryString() { - return collationQueryString; - } + public String getCollationQueryString() { + return collationQueryString; + } - public Collation setCollationQueryString(String collationQueryString) { - this.collationQueryString = collationQueryString; - return this; - } + public Collation setCollationQueryString(String collationQueryString) { + this.collationQueryString = collationQueryString; + return this; + } - public List getMisspellingsAndCorrections() { - return misspellingsAndCorrections; - } + public List getMisspellingsAndCorrections() { + return misspellingsAndCorrections; + } - public Collation addMisspellingsAndCorrection(Correction correction) { - this.misspellingsAndCorrections.add(correction); - return this; - } + public Collation addMisspellingsAndCorrection(Correction correction) { + this.misspellingsAndCorrections.add(correction); + return this; + } - } + } - public class Correction { - private String original; - private String correction; + public class Correction { + private String original; + private String correction; - public Correction(String original, String correction) { - this.original = original; - this.correction = correction; - } + public Correction(String original, String correction) { + this.original = original; + this.correction = correction; + } - public String getOriginal() { - return original; - } + public String getOriginal() { + return original; + } - public void setOriginal(String original) { - this.original = original; - } + public void setOriginal(String original) { + this.original = original; + } - public String getCorrection() { - return correction; - } + public String getCorrection() { + return correction; + } - public void setCorrection(String correction) { - this.correction = correction; - } - } + public void setCorrection(String correction) { + this.correction = correction; + } + } } Index: solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDIHCacheTestCase.java =================================================================== --- solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDIHCacheTestCase.java (revision 1381159) +++ solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDIHCacheTestCase.java (working copy) @@ -38,197 +38,197 @@ import org.junit.Assert; import org.junit.Before; -public class AbstractDIHCacheTestCase { - protected static final Date Feb21_2011 = new Date(1298268000000l); - protected final String[] fieldTypes = { "INTEGER", "BIGDECIMAL", "STRING", "STRING", "FLOAT", "DATE", "CLOB" }; - protected final String[] fieldNames = { "a_id", "PI", "letter", "examples", "a_float", "a_date", "DESCRIPTION" }; - protected List data = new ArrayList(); - protected Clob APPLE = null; - - @Before - public void setup() { - try { - APPLE = new SerialClob(new String("Apples grow on trees and they are good to eat.").toCharArray()); - } catch (SQLException sqe) { - Assert.fail("Could not Set up Test"); - } +public class AbstractDIHCacheTestCase { + protected static final Date Feb21_2011 = new Date(1298268000000l); + protected final String[] fieldTypes = { "INTEGER", "BIGDECIMAL", "STRING", "STRING", "FLOAT", "DATE", "CLOB" }; + protected final String[] fieldNames = { "a_id", "PI", "letter", "examples", "a_float", "a_date", "DESCRIPTION" }; + protected List data = new ArrayList(); + protected Clob APPLE = null; - // The first row needs to have all non-null fields, - // otherwise we would have to always send the fieldTypes & fieldNames as CacheProperties when building. - data = new ArrayList(); - data.add(new ControlData(new Object[] { new Integer(1), new BigDecimal(Math.PI), "A", "Apple", new Float(1.11), Feb21_2011, APPLE })); - data.add(new ControlData(new Object[] { new Integer(2), new BigDecimal(Math.PI), "B", "Ball", new Float(2.22), Feb21_2011, null })); - data.add(new ControlData(new Object[] { new Integer(4), new BigDecimal(Math.PI), "D", "Dog", new Float(4.44), Feb21_2011, null })); - data.add(new ControlData(new Object[] { new Integer(3), new BigDecimal(Math.PI), "C", "Cookie", new Float(3.33), Feb21_2011, null })); - data.add(new ControlData(new Object[] { new Integer(4), new BigDecimal(Math.PI), "D", "Daisy", new Float(4.44), Feb21_2011, null })); - data.add(new ControlData(new Object[] { new Integer(4), new BigDecimal(Math.PI), "D", "Drawing", new Float(4.44), Feb21_2011, null })); - data.add(new ControlData(new Object[] { new Integer(5), new BigDecimal(Math.PI), "E", - Arrays.asList("Eggplant", "Ear", "Elephant", "Engine"), new Float(5.55), Feb21_2011, null })); - } + @Before + public void setup() { + try { + APPLE = new SerialClob(new String("Apples grow on trees and they are good to eat.").toCharArray()); + } catch (SQLException sqe) { + Assert.fail("Could not Set up Test"); + } - @After - public void teardown() { - APPLE = null; - data = null; - } - - //A limitation of this test class is that the primary key needs to be the first one in the list. - //DIHCaches, however, can handle any field being the primary key. - class ControlData implements Comparable, Iterable { - Object[] data; + // The first row needs to have all non-null fields, + // otherwise we would have to always send the fieldTypes & fieldNames as CacheProperties when building. + data = new ArrayList(); + data.add(new ControlData(new Object[] { new Integer(1), new BigDecimal(Math.PI), "A", "Apple", new Float(1.11), Feb21_2011, APPLE })); + data.add(new ControlData(new Object[] { new Integer(2), new BigDecimal(Math.PI), "B", "Ball", new Float(2.22), Feb21_2011, null })); + data.add(new ControlData(new Object[] { new Integer(4), new BigDecimal(Math.PI), "D", "Dog", new Float(4.44), Feb21_2011, null })); + data.add(new ControlData(new Object[] { new Integer(3), new BigDecimal(Math.PI), "C", "Cookie", new Float(3.33), Feb21_2011, null })); + data.add(new ControlData(new Object[] { new Integer(4), new BigDecimal(Math.PI), "D", "Daisy", new Float(4.44), Feb21_2011, null })); + data.add(new ControlData(new Object[] { new Integer(4), new BigDecimal(Math.PI), "D", "Drawing", new Float(4.44), Feb21_2011, null })); + data.add(new ControlData(new Object[] { new Integer(5), new BigDecimal(Math.PI), "E", + Arrays.asList("Eggplant", "Ear", "Elephant", "Engine"), new Float(5.55), Feb21_2011, null })); + } - ControlData(Object[] data) { - this.data = data; - } + @After + public void teardown() { + APPLE = null; + data = null; + } - @SuppressWarnings("unchecked") - public int compareTo(ControlData cd) { - Comparable c1 = (Comparable) data[0]; - Comparable c2 = (Comparable) cd.data[0]; - return c1.compareTo(c2); - } + //A limitation of this test class is that the primary key needs to be the first one in the list. + //DIHCaches, however, can handle any field being the primary key. + class ControlData implements Comparable, Iterable { + Object[] data; - public Iterator iterator() { - return Arrays.asList(data).iterator(); - } - } - - protected void loadData(DIHCache cache, List theData, String[] theFieldNames, boolean keepOrdered) { - for (ControlData cd : theData) { - cache.add(controlDataToMap(cd, theFieldNames, keepOrdered)); - } - } + ControlData(Object[] data) { + this.data = data; + } - protected List extractDataInKeyOrder(DIHCache cache, String[] theFieldNames) { - List data = new ArrayList(); - Iterator> cacheIter = cache.iterator(); - while (cacheIter.hasNext()) { - data.add(mapToObjectArray(cacheIter.next(), theFieldNames)); - } - return listToControlData(data); - } + @SuppressWarnings("unchecked") + public int compareTo(ControlData cd) { + Comparable c1 = (Comparable) data[0]; + Comparable c2 = (Comparable) cd.data[0]; + return c1.compareTo(c2); + } - //This method assumes that the Primary Keys are integers and that the first id=1. - //It will look for id's sequentially until one is skipped, then will stop. - protected List extractDataByKeyLookup(DIHCache cache, String[] theFieldNames) { - int recId = 1; - List data = new ArrayList(); - while (true) { - Iterator> listORecs = cache.iterator(recId); - if (listORecs == null) { - break; - } + public Iterator iterator() { + return Arrays.asList(data).iterator(); + } + } - while(listORecs.hasNext()) { - data.add(mapToObjectArray(listORecs.next(), theFieldNames)); - } - recId++; - } - return listToControlData(data); - } + protected void loadData(DIHCache cache, List theData, String[] theFieldNames, boolean keepOrdered) { + for (ControlData cd : theData) { + cache.add(controlDataToMap(cd, theFieldNames, keepOrdered)); + } + } - protected List listToControlData(List data) { - List returnData = new ArrayList(data.size()); - for (int i = 0; i < data.size(); i++) { - returnData.add(new ControlData(data.get(i))); - } - return returnData; - } + protected List extractDataInKeyOrder(DIHCache cache, String[] theFieldNames) { + List data = new ArrayList(); + Iterator> cacheIter = cache.iterator(); + while (cacheIter.hasNext()) { + data.add(mapToObjectArray(cacheIter.next(), theFieldNames)); + } + return listToControlData(data); + } - protected Object[] mapToObjectArray(Map rec, String[] theFieldNames) { - Object[] oos = new Object[theFieldNames.length]; - for (int i = 0; i < theFieldNames.length; i++) { - oos[i] = rec.get(theFieldNames[i]); - } - return oos; - } + //This method assumes that the Primary Keys are integers and that the first id=1. + //It will look for id's sequentially until one is skipped, then will stop. + protected List extractDataByKeyLookup(DIHCache cache, String[] theFieldNames) { + int recId = 1; + List data = new ArrayList(); + while (true) { + Iterator> listORecs = cache.iterator(recId); + if (listORecs == null) { + break; + } - protected void compareData(List theControl, List test) { - // The test data should come back primarily in Key order and secondarily in insertion order. - List control = new ArrayList(theControl); - Collections.sort(control); + while(listORecs.hasNext()) { + data.add(mapToObjectArray(listORecs.next(), theFieldNames)); + } + recId++; + } + return listToControlData(data); + } - StringBuilder errors = new StringBuilder(); - if (test.size() != control.size()) { - errors.append("-Returned data has " + test.size() + " records. expected: " + control.size() + "\n"); - } - for (int i = 0; i < control.size() && i < test.size(); i++) { - Object[] controlRec = control.get(i).data; - Object[] testRec = test.get(i).data; - if (testRec.length != controlRec.length) { - errors.append("-Record indexAt=" + i + " has " + testRec.length + " data elements. extpected: " + controlRec.length + "\n"); - } - for (int j = 0; j < controlRec.length && j < testRec.length; j++) { - Object controlObj = controlRec[j]; - Object testObj = testRec[j]; - if (controlObj == null && testObj != null) { - errors.append("-Record indexAt=" + i + ", Data Element indexAt=" + j + " is not NULL as expected.\n"); - } else if (controlObj != null && testObj == null) { - errors.append("-Record indexAt=" + i + ", Data Element indexAt=" + j + " is NULL. Expected: " + controlObj + " (class=" - + controlObj.getClass().getName() + ")\n"); - } else if (controlObj != null && testObj != null && controlObj instanceof Clob) { - String controlString = clobToString((Clob) controlObj); - String testString = clobToString((Clob) testObj); - if (!controlString.equals(testString)) { - errors.append("-Record indexAt=" + i + ", Data Element indexAt=" + j + " has: " + testString + " (class=Clob) ... Expected: " + controlString - + " (class=Clob)\n"); - } - } else if (controlObj != null && !controlObj.equals(testObj)) { - errors.append("-Record indexAt=" + i + ", Data Element indexAt=" + j + " has: " + testObj + " (class=" + testObj.getClass().getName() - + ") ... Expected: " + controlObj + " (class=" + controlObj.getClass().getName() + ")\n"); - } - } - } - if (errors.length() > 0) { - Assert.fail(errors.toString()); - } - } + protected List listToControlData(List data) { + List returnData = new ArrayList(data.size()); + for (int i = 0; i < data.size(); i++) { + returnData.add(new ControlData(data.get(i))); + } + return returnData; + } - protected Map controlDataToMap(ControlData cd, String[] theFieldNames, boolean keepOrdered) { - Map rec = null; - if (keepOrdered) { - rec = new LinkedHashMap(); - } else { - rec = new HashMap(); - } - for (int i = 0; i < cd.data.length; i++) { - String fieldName = theFieldNames[i]; - Object data = cd.data[i]; - rec.put(fieldName, data); - } - return rec; - } + protected Object[] mapToObjectArray(Map rec, String[] theFieldNames) { + Object[] oos = new Object[theFieldNames.length]; + for (int i = 0; i < theFieldNames.length; i++) { + oos[i] = rec.get(theFieldNames[i]); + } + return oos; + } - protected String stringArrayToCommaDelimitedList(String[] strs) { - StringBuilder sb = new StringBuilder(); - for (String a : strs) { - if (sb.length() > 0) { - sb.append(","); - } - sb.append(a); - } - return sb.toString(); - } + protected void compareData(List theControl, List test) { + // The test data should come back primarily in Key order and secondarily in insertion order. + List control = new ArrayList(theControl); + Collections.sort(control); - protected String clobToString(Clob cl) { - StringBuilder sb = new StringBuilder(); - try { - Reader in = cl.getCharacterStream(); - char[] cbuf = new char[1024]; - int numGot = -1; - while ((numGot = in.read(cbuf)) != -1) { - sb.append(String.valueOf(cbuf, 0, numGot)); - } - } catch (Exception e) { - Assert.fail(e.toString()); - } - return sb.toString(); - } - - public static Context getContext(final Map entityAttrs) { - VariableResolverImpl resolver = new VariableResolverImpl(); + StringBuilder errors = new StringBuilder(); + if (test.size() != control.size()) { + errors.append("-Returned data has " + test.size() + " records. expected: " + control.size() + "\n"); + } + for (int i = 0; i < control.size() && i < test.size(); i++) { + Object[] controlRec = control.get(i).data; + Object[] testRec = test.get(i).data; + if (testRec.length != controlRec.length) { + errors.append("-Record indexAt=" + i + " has " + testRec.length + " data elements. extpected: " + controlRec.length + "\n"); + } + for (int j = 0; j < controlRec.length && j < testRec.length; j++) { + Object controlObj = controlRec[j]; + Object testObj = testRec[j]; + if (controlObj == null && testObj != null) { + errors.append("-Record indexAt=" + i + ", Data Element indexAt=" + j + " is not NULL as expected.\n"); + } else if (controlObj != null && testObj == null) { + errors.append("-Record indexAt=" + i + ", Data Element indexAt=" + j + " is NULL. Expected: " + controlObj + " (class=" + + controlObj.getClass().getName() + ")\n"); + } else if (controlObj != null && testObj != null && controlObj instanceof Clob) { + String controlString = clobToString((Clob) controlObj); + String testString = clobToString((Clob) testObj); + if (!controlString.equals(testString)) { + errors.append("-Record indexAt=" + i + ", Data Element indexAt=" + j + " has: " + testString + " (class=Clob) ... Expected: " + controlString + + " (class=Clob)\n"); + } + } else if (controlObj != null && !controlObj.equals(testObj)) { + errors.append("-Record indexAt=" + i + ", Data Element indexAt=" + j + " has: " + testObj + " (class=" + testObj.getClass().getName() + + ") ... Expected: " + controlObj + " (class=" + controlObj.getClass().getName() + ")\n"); + } + } + } + if (errors.length() > 0) { + Assert.fail(errors.toString()); + } + } + + protected Map controlDataToMap(ControlData cd, String[] theFieldNames, boolean keepOrdered) { + Map rec = null; + if (keepOrdered) { + rec = new LinkedHashMap(); + } else { + rec = new HashMap(); + } + for (int i = 0; i < cd.data.length; i++) { + String fieldName = theFieldNames[i]; + Object data = cd.data[i]; + rec.put(fieldName, data); + } + return rec; + } + + protected String stringArrayToCommaDelimitedList(String[] strs) { + StringBuilder sb = new StringBuilder(); + for (String a : strs) { + if (sb.length() > 0) { + sb.append(","); + } + sb.append(a); + } + return sb.toString(); + } + + protected String clobToString(Clob cl) { + StringBuilder sb = new StringBuilder(); + try { + Reader in = cl.getCharacterStream(); + char[] cbuf = new char[1024]; + int numGot = -1; + while ((numGot = in.read(cbuf)) != -1) { + sb.append(String.valueOf(cbuf, 0, numGot)); + } + } catch (Exception e) { + Assert.fail(e.toString()); + } + return sb.toString(); + } + + public static Context getContext(final Map entityAttrs) { + VariableResolverImpl resolver = new VariableResolverImpl(); final Context delegate = new ContextImpl(null, resolver, null, null, new HashMap(), null, null); return new TestContext(entityAttrs, delegate, null, true); } - + } Index: solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSortedMapBackedCache.java =================================================================== --- solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSortedMapBackedCache.java (revision 1381159) +++ solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSortedMapBackedCache.java (working copy) @@ -17,143 +17,143 @@ * limitations under the License. */ +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.math.BigDecimal; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; -import org.junit.Assert; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - public class TestSortedMapBackedCache extends AbstractDIHCacheTestCase { - - public static Logger log = LoggerFactory.getLogger(TestSortedMapBackedCache.class); - - @Test - public void testCacheWithKeyLookup() { - DIHCache cache = null; - try { - cache = new SortedMapBackedCache(); - cache.open(getContext(new HashMap())); - loadData(cache, data, fieldNames, true); - List testData = extractDataByKeyLookup(cache, fieldNames); - compareData(data, testData); - } catch (Exception e) { - log.warn("Exception thrown: " + e.toString()); - Assert.fail(); - } finally { - try { - cache.destroy(); - } catch (Exception ex) { - } - } - } - @Test - public void testCacheWithOrderedLookup() { - DIHCache cache = null; - try { - cache = new SortedMapBackedCache(); - cache.open(getContext(new HashMap())); - loadData(cache, data, fieldNames, true); - List testData = extractDataInKeyOrder(cache, fieldNames); - compareData(data, testData); - } catch (Exception e) { - log.warn("Exception thrown: " + e.toString()); - Assert.fail(); - } finally { - try { - cache.destroy(); - } catch (Exception ex) { - } - } - } + public static Logger log = LoggerFactory.getLogger(TestSortedMapBackedCache.class); - @Test - public void testCacheReopensWithUpdate() { - DIHCache cache = null; - try { - Map cacheProps = new HashMap(); - cacheProps.put(DIHCacheSupport.CACHE_PRIMARY_KEY, "a_id"); - - cache = new SortedMapBackedCache(); - cache.open(getContext(cacheProps)); - // We can let the data hit the cache with the fields out of order because - // we've identified the pk up-front. - loadData(cache, data, fieldNames, false); + @Test + public void testCacheWithKeyLookup() { + DIHCache cache = null; + try { + cache = new SortedMapBackedCache(); + cache.open(getContext(new HashMap())); + loadData(cache, data, fieldNames, true); + List testData = extractDataByKeyLookup(cache, fieldNames); + compareData(data, testData); + } catch (Exception e) { + log.warn("Exception thrown: " + e.toString()); + Assert.fail(); + } finally { + try { + cache.destroy(); + } catch (Exception ex) { + } + } + } - // Close the cache. - cache.close(); + @Test + public void testCacheWithOrderedLookup() { + DIHCache cache = null; + try { + cache = new SortedMapBackedCache(); + cache.open(getContext(new HashMap())); + loadData(cache, data, fieldNames, true); + List testData = extractDataInKeyOrder(cache, fieldNames); + compareData(data, testData); + } catch (Exception e) { + log.warn("Exception thrown: " + e.toString()); + Assert.fail(); + } finally { + try { + cache.destroy(); + } catch (Exception ex) { + } + } + } - List newControlData = new ArrayList(); - Object[] newIdEqualsThree = null; - int j = 0; - for (int i = 0; i < data.size(); i++) { - // We'll be deleting a_id=1 so remove it from the control data. - if (data.get(i).data[0].equals(new Integer(1))) { - continue; - } + @Test + public void testCacheReopensWithUpdate() { + DIHCache cache = null; + try { + Map cacheProps = new HashMap(); + cacheProps.put(DIHCacheSupport.CACHE_PRIMARY_KEY, "a_id"); - // We'll be changing "Cookie" to "Carrot" in a_id=3 so change it in the control data. - if (data.get(i).data[0].equals(new Integer(3))) { - newIdEqualsThree = new Object[data.get(i).data.length]; - System.arraycopy(data.get(i).data, 0, newIdEqualsThree, 0, newIdEqualsThree.length); - newIdEqualsThree[3] = "Carrot"; - newControlData.add(new ControlData(newIdEqualsThree)); - } - // Everything else can just be copied over. - else { - newControlData.add(data.get(i)); - } + cache = new SortedMapBackedCache(); + cache.open(getContext(cacheProps)); + // We can let the data hit the cache with the fields out of order because + // we've identified the pk up-front. + loadData(cache, data, fieldNames, false); - j++; - } + // Close the cache. + cache.close(); - // These new rows of data will get added to the cache, so add them to the control data too. - Object[] newDataRow1 = new Object[] { new Integer(99), new BigDecimal(Math.PI), "Z", "Zebra", new Float(99.99), Feb21_2011, null }; - Object[] newDataRow2 = new Object[] { new Integer(2), new BigDecimal(Math.PI), "B", "Ballerina", new Float(2.22), Feb21_2011, null }; + List newControlData = new ArrayList(); + Object[] newIdEqualsThree = null; + int j = 0; + for (int i = 0; i < data.size(); i++) { + // We'll be deleting a_id=1 so remove it from the control data. + if (data.get(i).data[0].equals(new Integer(1))) { + continue; + } - newControlData.add(new ControlData(newDataRow1)); - newControlData.add(new ControlData(newDataRow2)); + // We'll be changing "Cookie" to "Carrot" in a_id=3 so change it in the control data. + if (data.get(i).data[0].equals(new Integer(3))) { + newIdEqualsThree = new Object[data.get(i).data.length]; + System.arraycopy(data.get(i).data, 0, newIdEqualsThree, 0, newIdEqualsThree.length); + newIdEqualsThree[3] = "Carrot"; + newControlData.add(new ControlData(newIdEqualsThree)); + } + // Everything else can just be copied over. + else { + newControlData.add(data.get(i)); + } - // Re-open the cache - cache.open(getContext(new HashMap())); + j++; + } - // Delete a_id=1 from the cache. - cache.delete(new Integer(1)); + // These new rows of data will get added to the cache, so add them to the control data too. + Object[] newDataRow1 = new Object[]{new Integer(99), new BigDecimal(Math.PI), "Z", "Zebra", new Float(99.99), Feb21_2011, null}; + Object[] newDataRow2 = new Object[]{new Integer(2), new BigDecimal(Math.PI), "B", "Ballerina", new Float(2.22), Feb21_2011, null}; - // Because the cache allows duplicates, the only way to update is to - // delete first then add. - cache.delete(new Integer(3)); - cache.add(controlDataToMap(new ControlData(newIdEqualsThree), fieldNames, false)); + newControlData.add(new ControlData(newDataRow1)); + newControlData.add(new ControlData(newDataRow2)); - // Add this row with a new Primary key. - cache.add(controlDataToMap(new ControlData(newDataRow1), fieldNames, false)); + // Re-open the cache + cache.open(getContext(new HashMap())); - // Add this row, creating two records in the cache with a_id=2. - cache.add(controlDataToMap(new ControlData(newDataRow2), fieldNames, false)); + // Delete a_id=1 from the cache. + cache.delete(new Integer(1)); - // Read the cache back and compare to the newControlData - List testData = extractDataInKeyOrder(cache, fieldNames); - compareData(newControlData, testData); + // Because the cache allows duplicates, the only way to update is to + // delete first then add. + cache.delete(new Integer(3)); + cache.add(controlDataToMap(new ControlData(newIdEqualsThree), fieldNames, false)); - // Now try reading the cache read-only. - cache.close(); - cache.open(getContext(new HashMap())); - testData = extractDataInKeyOrder(cache, fieldNames); - compareData(newControlData, testData); + // Add this row with a new Primary key. + cache.add(controlDataToMap(new ControlData(newDataRow1), fieldNames, false)); - } catch (Exception e) { - log.warn("Exception thrown: " + e.toString()); - Assert.fail(); - } finally { - try { - cache.destroy(); - } catch (Exception ex) { - } - } - } + // Add this row, creating two records in the cache with a_id=2. + cache.add(controlDataToMap(new ControlData(newDataRow2), fieldNames, false)); + + // Read the cache back and compare to the newControlData + List testData = extractDataInKeyOrder(cache, fieldNames); + compareData(newControlData, testData); + + // Now try reading the cache read-only. + cache.close(); + cache.open(getContext(new HashMap())); + testData = extractDataInKeyOrder(cache, fieldNames); + compareData(newControlData, testData); + + } catch (Exception e) { + log.warn("Exception thrown: " + e.toString()); + Assert.fail(); + } finally { + try { + cache.destroy(); + } catch (Exception ex) { + } + } + } } Index: solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java =================================================================== --- solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java (revision 1381159) +++ solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java (working copy) @@ -133,11 +133,11 @@ @Override public void destroy() { - query = null; - if(cacheSupport!=null){ - cacheSupport.destroyAll(); - } - cacheSupport = null; + query = null; + if(cacheSupport!=null){ + cacheSupport.destroyAll(); + } + cacheSupport = null; } Index: solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SolrWriter.java =================================================================== --- solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SolrWriter.java (revision 1381159) +++ solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SolrWriter.java (working copy) @@ -54,12 +54,12 @@ @Override public void close() { - try { - processor.finish(); - } catch (IOException e) { - throw new DataImportHandlerException(DataImportHandlerException.SEVERE, - "Unable to call finish() on UpdateRequestProcessor", e); - } + try { + processor.finish(); + } catch (IOException e) { + throw new DataImportHandlerException(DataImportHandlerException.SEVERE, + "Unable to call finish() on UpdateRequestProcessor", e); + } } @Override public boolean upload(SolrInputDocument d) { @@ -87,8 +87,8 @@ log.error("Exception while deleteing: " + id, e); } } - - @Override + + @Override public void deleteByQuery(String query) { try { log.info("Deleting documents from Solr with query: " + query); @@ -100,7 +100,7 @@ } } - @Override + @Override public void commit(boolean optimize) { try { CommitUpdateCommand commit = new CommitUpdateCommand(req,optimize); @@ -110,7 +110,7 @@ } } - @Override + @Override public void rollback() { try { RollbackUpdateCommand rollback = new RollbackUpdateCommand(req); @@ -120,7 +120,7 @@ } } - @Override + @Override public void doDeleteAll() { try { DeleteUpdateCommand deleteCommand = new DeleteUpdateCommand(req); @@ -158,8 +158,8 @@ return null; } } - @Override - public void init(Context context) { - /* NO-OP */ - } + @Override + public void init(Context context) { + /* NO-OP */ + } } Index: solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHLogLevels.java =================================================================== --- solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHLogLevels.java (revision 1381159) +++ solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHLogLevels.java (working copy) @@ -18,5 +18,5 @@ */ public enum DIHLogLevels { - START_ENTITY, END_ENTITY, TRANSFORMED_ROW, ENTITY_META, PRE_TRANSFORMER_ROW, START_DOC, END_DOC, ENTITY_OUT, ROW_END, TRANSFORMER_EXCEPTION, ENTITY_EXCEPTION, DISABLE_LOGGING, ENABLE_LOGGING, NONE + START_ENTITY, END_ENTITY, TRANSFORMED_ROW, ENTITY_META, PRE_TRANSFORMER_ROW, START_DOC, END_DOC, ENTITY_OUT, ROW_END, TRANSFORMER_EXCEPTION, ENTITY_EXCEPTION, DISABLE_LOGGING, ENABLE_LOGGING, NONE } Index: solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImporter.java =================================================================== --- solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImporter.java (revision 1381159) +++ solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImporter.java (working copy) @@ -393,7 +393,7 @@ } private void checkWritablePersistFile(SolrWriter writer) { -// File persistFile = propWriter.getPersistFile(); +// File persistFile = propWriter.getPersistFile(); // boolean isWritable = persistFile.exists() ? persistFile.canWrite() : persistFile.getParentFile().canWrite(); if (isDeltaImportSupported && !propWriter.isWritable()) { throw new DataImportHandlerException(SEVERE, Index: solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHWriter.java =================================================================== --- solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHWriter.java (revision 1381159) +++ solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHWriter.java (working copy) @@ -25,81 +25,81 @@ * */ public interface DIHWriter { - - /** - *

- * If this writer supports transactions or commit points, then commit any changes, - * optionally optimizing the data for read/write performance - *

- * @param optimize - */ - public void commit(boolean optimize); - - /** - *

- * Release resources used by this writer. After calling close, reads & updates will throw exceptions. - *

- */ - public void close(); - /** - *

- * If this writer supports transactions or commit points, then roll back any uncommitted changes. - *

- */ - public void rollback(); + /** + *

+ * If this writer supports transactions or commit points, then commit any changes, + * optionally optimizing the data for read/write performance + *

+ * @param optimize + */ + public void commit(boolean optimize); - /** - *

- * Delete from the writer's underlying data store based the passed-in writer-specific query. (Optional Operation) - *

- * @param q - */ - public void deleteByQuery(String q); + /** + *

+ * Release resources used by this writer. After calling close, reads & updates will throw exceptions. + *

+ */ + public void close(); - /** - *

- * Delete everything from the writer's underlying data store - *

- */ - public void doDeleteAll(); + /** + *

+ * If this writer supports transactions or commit points, then roll back any uncommitted changes. + *

+ */ + public void rollback(); - /** - *

- * Delete from the writer's underlying data store based on the passed-in Primary Key - *

- * @param key - */ - public void deleteDoc(Object key); - + /** + *

+ * Delete from the writer's underlying data store based the passed-in writer-specific query. (Optional Operation) + *

+ * @param q + */ + public void deleteByQuery(String q); + /** + *

+ * Delete everything from the writer's underlying data store + *

+ */ + public void doDeleteAll(); - /** - *

- * Add a document to this writer's underlying data store. - *

- * @param doc - * @return true on success, false on failure - */ - public boolean upload(SolrInputDocument doc); + /** + *

+ * Delete from the writer's underlying data store based on the passed-in Primary Key + *

+ * @param key + */ + public void deleteDoc(Object key); - - /** - *

- * Provide context information for this writer. init() should be called before using the writer. - *

- * @param context - */ - public void init(Context context) ; - - /** - *

- * Specify the keys to be modified by a delta update (required by writers that can store duplicate keys) - *

- * @param deltaKeys - */ - public void setDeltaKeys(Set> deltaKeys) ; + /** + *

+ * Add a document to this writer's underlying data store. + *

+ * @param doc + * @return true on success, false on failure + */ + public boolean upload(SolrInputDocument doc); + + + /** + *

+ * Provide context information for this writer. init() should be called before using the writer. + *

+ * @param context + */ + public void init(Context context) ; + + + /** + *

+ * Specify the keys to be modified by a delta update (required by writers that can store duplicate keys) + *

+ * @param deltaKeys + */ + public void setDeltaKeys(Set> deltaKeys) ; + } Index: solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java =================================================================== --- solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java (revision 1381159) +++ solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java (working copy) @@ -178,7 +178,7 @@ rsp.add("mode", "debug"); rsp.add("documents", requestParams.getDebugInfo().debugDocuments); if (requestParams.getDebugInfo().debugVerboseOutput != null) { - rsp.add("verbose-output", requestParams.getDebugInfo().debugVerboseOutput); + rsp.add("verbose-output", requestParams.getDebugInfo().debugVerboseOutput); } } else { message = DataImporter.MSG.DEBUG_NOT_ENABLED; Index: solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java =================================================================== --- solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java (revision 1381159) +++ solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java (working copy) @@ -30,15 +30,15 @@ import org.slf4j.LoggerFactory; public class SimplePropertiesWriter implements DIHPropertiesWriter { - private static final Logger log = LoggerFactory.getLogger(SimplePropertiesWriter.class); + private static final Logger log = LoggerFactory.getLogger(SimplePropertiesWriter.class); - static final String IMPORTER_PROPERTIES = "dataimport.properties"; + static final String IMPORTER_PROPERTIES = "dataimport.properties"; - static final String LAST_INDEX_KEY = "last_index_time"; + static final String LAST_INDEX_KEY = "last_index_time"; - private String persistFilename = IMPORTER_PROPERTIES; + private String persistFilename = IMPORTER_PROPERTIES; - private String configDir = null; + private String configDir = null; @@ -48,15 +48,15 @@ String persistFileName = dataImporter.getHandlerName(); this.configDir = configDir; - if(persistFileName != null){ + if(persistFileName != null){ persistFilename = persistFileName + ".properties"; } } - - private File getPersistFile() { + + private File getPersistFile() { String filePath = configDir; if (configDir != null && !configDir.endsWith(File.separator)) filePath += File.separator; @@ -71,53 +71,53 @@ } @Override - public void persist(Properties p) { - OutputStream propOutput = null; + public void persist(Properties p) { + OutputStream propOutput = null; - Properties props = readIndexerProperties(); + Properties props = readIndexerProperties(); - try { - props.putAll(p); - String filePath = configDir; - if (configDir != null && !configDir.endsWith(File.separator)) - filePath += File.separator; - filePath += persistFilename; - propOutput = new FileOutputStream(filePath); - props.store(propOutput, null); - log.info("Wrote last indexed time to " + persistFilename); - } catch (Exception e) { - throw new DataImportHandlerException(DataImportHandlerException.SEVERE, "Unable to persist Index Start Time", e); - } finally { - try { - if (propOutput != null) - propOutput.close(); - } catch (IOException e) { - propOutput = null; - } - } - } + try { + props.putAll(p); + String filePath = configDir; + if (configDir != null && !configDir.endsWith(File.separator)) + filePath += File.separator; + filePath += persistFilename; + propOutput = new FileOutputStream(filePath); + props.store(propOutput, null); + log.info("Wrote last indexed time to " + persistFilename); + } catch (Exception e) { + throw new DataImportHandlerException(DataImportHandlerException.SEVERE, "Unable to persist Index Start Time", e); + } finally { + try { + if (propOutput != null) + propOutput.close(); + } catch (IOException e) { + propOutput = null; + } + } + } - @Override - public Properties readIndexerProperties() { - Properties props = new Properties(); - InputStream propInput = null; + @Override + public Properties readIndexerProperties() { + Properties props = new Properties(); + InputStream propInput = null; - try { - propInput = new FileInputStream(configDir + persistFilename); - props.load(propInput); - log.info("Read " + persistFilename); - } catch (Exception e) { - log.warn("Unable to read: " + persistFilename); - } finally { - try { - if (propInput != null) - propInput.close(); - } catch (IOException e) { - propInput = null; - } - } + try { + propInput = new FileInputStream(configDir + persistFilename); + props.load(propInput); + log.info("Read " + persistFilename); + } catch (Exception e) { + log.warn("Unable to read: " + persistFilename); + } finally { + try { + if (propInput != null) + propInput.close(); + } catch (IOException e) { + propInput = null; + } + } - return props; - } + return props; + } } Index: solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DocBuilder.java =================================================================== --- solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DocBuilder.java (revision 1381159) +++ solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DocBuilder.java (working copy) @@ -89,17 +89,17 @@ String writerClassStr = null; if(reqParams!=null && reqParams.getRawParams() != null) { - writerClassStr = (String) reqParams.getRawParams().get(PARAM_WRITER_IMPL); + writerClassStr = (String) reqParams.getRawParams().get(PARAM_WRITER_IMPL); } if(writerClassStr != null && !writerClassStr.equals(DEFAULT_WRITER_NAME) && !writerClassStr.equals(DocBuilder.class.getPackage().getName() + "." + DEFAULT_WRITER_NAME)) { - try { - Class writerClass = loadClass(writerClassStr, dataImporter.getCore()); - this.writer = writerClass.newInstance(); - } catch (Exception e) { - throw new DataImportHandlerException(DataImportHandlerException.SEVERE, "Unable to load Writer implementation:" + writerClassStr, e); - } - } else { - writer = solrWriter; + try { + Class writerClass = loadClass(writerClassStr, dataImporter.getCore()); + this.writer = writerClass.newInstance(); + } catch (Exception e) { + throw new DataImportHandlerException(DataImportHandlerException.SEVERE, "Unable to load Writer implementation:" + writerClassStr, e); + } + } else { + writer = solrWriter; } ContextImpl ctx = new ContextImpl(null, null, null, null, reqParams.getRawParams(), null, this); writer.init(ctx); @@ -178,111 +178,111 @@ @SuppressWarnings("unchecked") public void execute() { List epwList = null; - try { - dataImporter.store(DataImporter.STATUS_MSGS, statusMessages); - config = dataImporter.getConfig(); - final AtomicLong startTime = new AtomicLong(System.currentTimeMillis()); - statusMessages.put(TIME_ELAPSED, new Object() { - @Override - public String toString() { - return getTimeElapsedSince(startTime.get()); - } - }); - - statusMessages.put(DataImporter.MSG.TOTAL_QUERIES_EXECUTED, - importStatistics.queryCount); - statusMessages.put(DataImporter.MSG.TOTAL_ROWS_EXECUTED, - importStatistics.rowsCount); - statusMessages.put(DataImporter.MSG.TOTAL_DOC_PROCESSED, - importStatistics.docCount); - statusMessages.put(DataImporter.MSG.TOTAL_DOCS_SKIPPED, - importStatistics.skipDocCount); - - List entities = reqParams.getEntitiesToRun(); - - // Trigger onImportStart - if (config.getOnImportStart() != null) { - invokeEventListener(config.getOnImportStart()); - } - AtomicBoolean fullCleanDone = new AtomicBoolean(false); - //we must not do a delete of *:* multiple times if there are multiple root entities to be run - Properties lastIndexTimeProps = new Properties(); - lastIndexTimeProps.setProperty(LAST_INDEX_KEY, - DataImporter.DATE_TIME_FORMAT.get().format(dataImporter.getIndexStartTime())); - - epwList = new ArrayList(config.getEntities().size()); - for (Entity e : config.getEntities()) { - epwList.add(getEntityProcessorWrapper(e)); - } - for (EntityProcessorWrapper epw : epwList) { - if (entities != null && !entities.contains(epw.getEntity().getName())) - continue; - lastIndexTimeProps.setProperty(epw.getEntity().getName() + "." + LAST_INDEX_KEY, - DataImporter.DATE_TIME_FORMAT.get().format(new Date())); - currentEntityProcessorWrapper = epw; - String delQuery = epw.getEntity().getAllAttributes().get("preImportDeleteQuery"); - if (dataImporter.getStatus() == DataImporter.Status.RUNNING_DELTA_DUMP) { - cleanByQuery(delQuery, fullCleanDone); - doDelta(); - delQuery = epw.getEntity().getAllAttributes().get("postImportDeleteQuery"); - if (delQuery != null) { - fullCleanDone.set(false); - cleanByQuery(delQuery, fullCleanDone); - } - } else { - cleanByQuery(delQuery, fullCleanDone); - doFullDump(); - delQuery = epw.getEntity().getAllAttributes().get("postImportDeleteQuery"); - if (delQuery != null) { - fullCleanDone.set(false); - cleanByQuery(delQuery, fullCleanDone); - } - } - statusMessages.remove(DataImporter.MSG.TOTAL_DOC_PROCESSED); - } - - if (stop.get()) { - // Dont commit if aborted using command=abort - statusMessages.put("Aborted", DataImporter.DATE_TIME_FORMAT.get().format(new Date())); - rollback(); - } else { - // Do not commit unnecessarily if this is a delta-import and no documents were created or deleted - if (!reqParams.isClean()) { - if (importStatistics.docCount.get() > 0 || importStatistics.deletedDocCount.get() > 0) { - finish(lastIndexTimeProps); - } - } else { - // Finished operation normally, commit now - finish(lastIndexTimeProps); - } - - if (config.getOnImportEnd() != null) { - invokeEventListener(config.getOnImportEnd()); - } - } - - statusMessages.remove(TIME_ELAPSED); - statusMessages.put(DataImporter.MSG.TOTAL_DOC_PROCESSED, ""+ importStatistics.docCount.get()); - if(importStatistics.failedDocCount.get() > 0) - statusMessages.put(DataImporter.MSG.TOTAL_FAILED_DOCS, ""+ importStatistics.failedDocCount.get()); - - statusMessages.put("Time taken", getTimeElapsedSince(startTime.get())); - LOG.info("Time taken = " + getTimeElapsedSince(startTime.get())); - } catch(Exception e) - { - throw new RuntimeException(e); - } finally - { - if (writer != null) { - writer.close(); - } - if (epwList != null) { - closeEntityProcessorWrappers(epwList); - } - if(reqParams.isDebug()) { - reqParams.getDebugInfo().debugVerboseOutput = getDebugLogger().output; - } - } + try { + dataImporter.store(DataImporter.STATUS_MSGS, statusMessages); + config = dataImporter.getConfig(); + final AtomicLong startTime = new AtomicLong(System.currentTimeMillis()); + statusMessages.put(TIME_ELAPSED, new Object() { + @Override + public String toString() { + return getTimeElapsedSince(startTime.get()); + } + }); + + statusMessages.put(DataImporter.MSG.TOTAL_QUERIES_EXECUTED, + importStatistics.queryCount); + statusMessages.put(DataImporter.MSG.TOTAL_ROWS_EXECUTED, + importStatistics.rowsCount); + statusMessages.put(DataImporter.MSG.TOTAL_DOC_PROCESSED, + importStatistics.docCount); + statusMessages.put(DataImporter.MSG.TOTAL_DOCS_SKIPPED, + importStatistics.skipDocCount); + + List entities = reqParams.getEntitiesToRun(); + + // Trigger onImportStart + if (config.getOnImportStart() != null) { + invokeEventListener(config.getOnImportStart()); + } + AtomicBoolean fullCleanDone = new AtomicBoolean(false); + //we must not do a delete of *:* multiple times if there are multiple root entities to be run + Properties lastIndexTimeProps = new Properties(); + lastIndexTimeProps.setProperty(LAST_INDEX_KEY, + DataImporter.DATE_TIME_FORMAT.get().format(dataImporter.getIndexStartTime())); + + epwList = new ArrayList(config.getEntities().size()); + for (Entity e : config.getEntities()) { + epwList.add(getEntityProcessorWrapper(e)); + } + for (EntityProcessorWrapper epw : epwList) { + if (entities != null && !entities.contains(epw.getEntity().getName())) + continue; + lastIndexTimeProps.setProperty(epw.getEntity().getName() + "." + LAST_INDEX_KEY, + DataImporter.DATE_TIME_FORMAT.get().format(new Date())); + currentEntityProcessorWrapper = epw; + String delQuery = epw.getEntity().getAllAttributes().get("preImportDeleteQuery"); + if (dataImporter.getStatus() == DataImporter.Status.RUNNING_DELTA_DUMP) { + cleanByQuery(delQuery, fullCleanDone); + doDelta(); + delQuery = epw.getEntity().getAllAttributes().get("postImportDeleteQuery"); + if (delQuery != null) { + fullCleanDone.set(false); + cleanByQuery(delQuery, fullCleanDone); + } + } else { + cleanByQuery(delQuery, fullCleanDone); + doFullDump(); + delQuery = epw.getEntity().getAllAttributes().get("postImportDeleteQuery"); + if (delQuery != null) { + fullCleanDone.set(false); + cleanByQuery(delQuery, fullCleanDone); + } + } + statusMessages.remove(DataImporter.MSG.TOTAL_DOC_PROCESSED); + } + + if (stop.get()) { + // Dont commit if aborted using command=abort + statusMessages.put("Aborted", DataImporter.DATE_TIME_FORMAT.get().format(new Date())); + rollback(); + } else { + // Do not commit unnecessarily if this is a delta-import and no documents were created or deleted + if (!reqParams.isClean()) { + if (importStatistics.docCount.get() > 0 || importStatistics.deletedDocCount.get() > 0) { + finish(lastIndexTimeProps); + } + } else { + // Finished operation normally, commit now + finish(lastIndexTimeProps); + } + + if (config.getOnImportEnd() != null) { + invokeEventListener(config.getOnImportEnd()); + } + } + + statusMessages.remove(TIME_ELAPSED); + statusMessages.put(DataImporter.MSG.TOTAL_DOC_PROCESSED, ""+ importStatistics.docCount.get()); + if(importStatistics.failedDocCount.get() > 0) + statusMessages.put(DataImporter.MSG.TOTAL_FAILED_DOCS, ""+ importStatistics.failedDocCount.get()); + + statusMessages.put("Time taken", getTimeElapsedSince(startTime.get())); + LOG.info("Time taken = " + getTimeElapsedSince(startTime.get())); + } catch(Exception e) + { + throw new RuntimeException(e); + } finally + { + if (writer != null) { + writer.close(); + } + if (epwList != null) { + closeEntityProcessorWrappers(epwList); + } + if(reqParams.isDebug()) { + reqParams.getDebugInfo().debugVerboseOutput = getDebugLogger().output; + } + } } private void closeEntityProcessorWrappers(List epwList) { for(EntityProcessorWrapper epw : epwList) { @@ -506,7 +506,7 @@ if (!doc.isEmpty()) { boolean result = writer.upload(doc); if(reqParams.isDebug()) { - reqParams.getDebugInfo().debugDocuments.add(doc); + reqParams.getDebugInfo().debugDocuments.add(doc); } doc = null; if (result){ Index: solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DebugLogger.java =================================================================== --- solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DebugLogger.java (revision 1381159) +++ solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DebugLogger.java (working copy) @@ -143,7 +143,7 @@ private void popAllTransformers() { while (true) { - DIHLogLevels type = debugStack.peek().type; + DIHLogLevels type = debugStack.peek().type; if (type == DIHLogLevels.START_DOC || type == DIHLogLevels.START_ENTITY) break; debugStack.pop(); Index: solr/contrib/uima/src/test/org/apache/solr/uima/ts/SentimentAnnotation_Type.java =================================================================== --- solr/contrib/uima/src/test/org/apache/solr/uima/ts/SentimentAnnotation_Type.java (revision 1381159) +++ solr/contrib/uima/src/test/org/apache/solr/uima/ts/SentimentAnnotation_Type.java (working copy) @@ -23,17 +23,17 @@ private final FSGenerator fsGenerator = new FSGenerator() { public FeatureStructure createFS(int addr, CASImpl cas) { - if (SentimentAnnotation_Type.this.useExistingInstance) { - // Return eq fs instance if already created - FeatureStructure fs = SentimentAnnotation_Type.this.jcas.getJfsFromCaddr(addr); - if (null == fs) { - fs = new SentimentAnnotation(addr, SentimentAnnotation_Type.this); - SentimentAnnotation_Type.this.jcas.putJfsFromCaddr(addr, fs); - return fs; - } - return fs; + if (SentimentAnnotation_Type.this.useExistingInstance) { + // Return eq fs instance if already created + FeatureStructure fs = SentimentAnnotation_Type.this.jcas.getJfsFromCaddr(addr); + if (null == fs) { + fs = new SentimentAnnotation(addr, SentimentAnnotation_Type.this); + SentimentAnnotation_Type.this.jcas.putJfsFromCaddr(addr, fs); + return fs; + } + return fs; } else return new SentimentAnnotation(addr, SentimentAnnotation_Type.this); - } + } }; /** @generated */ public final static int typeIndexID = SentimentAnnotation.typeIndexID; @@ -62,7 +62,7 @@ /** initialize variables to correspond with Cas Type and Features - * @generated */ + * @generated */ public SentimentAnnotation_Type(JCas jcas, Type casType) { super(jcas, casType); casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator()); Index: solr/contrib/uima/src/test/org/apache/solr/uima/ts/EntityAnnotation_Type.java =================================================================== --- solr/contrib/uima/src/test/org/apache/solr/uima/ts/EntityAnnotation_Type.java (revision 1381159) +++ solr/contrib/uima/src/test/org/apache/solr/uima/ts/EntityAnnotation_Type.java (working copy) @@ -23,17 +23,17 @@ private final FSGenerator fsGenerator = new FSGenerator() { public FeatureStructure createFS(int addr, CASImpl cas) { - if (EntityAnnotation_Type.this.useExistingInstance) { - // Return eq fs instance if already created - FeatureStructure fs = EntityAnnotation_Type.this.jcas.getJfsFromCaddr(addr); - if (null == fs) { - fs = new EntityAnnotation(addr, EntityAnnotation_Type.this); - EntityAnnotation_Type.this.jcas.putJfsFromCaddr(addr, fs); - return fs; - } - return fs; + if (EntityAnnotation_Type.this.useExistingInstance) { + // Return eq fs instance if already created + FeatureStructure fs = EntityAnnotation_Type.this.jcas.getJfsFromCaddr(addr); + if (null == fs) { + fs = new EntityAnnotation(addr, EntityAnnotation_Type.this); + EntityAnnotation_Type.this.jcas.putJfsFromCaddr(addr, fs); + return fs; + } + return fs; } else return new EntityAnnotation(addr, EntityAnnotation_Type.this); - } + } }; /** @generated */ public final static int typeIndexID = EntityAnnotation.typeIndexID; @@ -80,7 +80,7 @@ /** initialize variables to correspond with Cas Type and Features - * @generated */ + * @generated */ public EntityAnnotation_Type(JCas jcas, Type casType) { super(jcas, casType); casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator()); Index: solr/contrib/velocity/src/java/org/apache/solr/response/PageTool.java =================================================================== --- solr/contrib/velocity/src/java/org/apache/solr/response/PageTool.java (revision 1381159) +++ solr/contrib/velocity/src/java/org/apache/solr/response/PageTool.java (working copy) @@ -58,7 +58,7 @@ results_found = doc_list.getNumFound(); start = doc_list.getStart(); } else { - throw new SolrException(SolrException.ErrorCode.UNKNOWN, "Unknown response type "+docs+". Expected one of DocSlice, ResultContext or SolrDocumentList"); + throw new SolrException(SolrException.ErrorCode.UNKNOWN, "Unknown response type "+docs+". Expected one of DocSlice, ResultContext or SolrDocumentList"); } } Index: lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java =================================================================== --- lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java (revision 1381159) +++ lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java (working copy) @@ -356,7 +356,7 @@ // only possible writer, and it is "synchronized" to avoid this case). DirectoryReader r2 = DirectoryReader.openIfChanged(indexReader); if (r2 == null) { - return false; // no changes, nothing to do + return false; // no changes, nothing to do } // validate that a refresh is valid at this point, i.e. that the taxonomy @@ -364,13 +364,13 @@ String t1 = indexReader.getIndexCommit().getUserData().get(DirectoryTaxonomyWriter.INDEX_CREATE_TIME); String t2 = r2.getIndexCommit().getUserData().get(DirectoryTaxonomyWriter.INDEX_CREATE_TIME); if (t1==null) { - if (t2!=null) { - r2.close(); - throw new InconsistentTaxonomyException("Taxonomy was recreated at: "+t2); - } + if (t2!=null) { + r2.close(); + throw new InconsistentTaxonomyException("Taxonomy was recreated at: "+t2); + } } else if (!t1.equals(t2)) { - r2.close(); - throw new InconsistentTaxonomyException("Taxonomy was recreated at: "+t2+" != "+t1); + r2.close(); + throw new InconsistentTaxonomyException("Taxonomy was recreated at: "+t2+" != "+t1); } IndexReader oldreader = indexReader; Index: lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java =================================================================== --- lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java (revision 1381159) +++ lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java (working copy) @@ -89,7 +89,7 @@ Properties queryFormProperties = getPropsFromString(queryForm); //Get the required query XSL template for this test -// Templates template=getTemplate(queryFormProperties.getProperty("template")); +// Templates template=getTemplate(queryFormProperties.getProperty("template")); //Transform the queryFormProperties into a Lucene XML query Document doc = qtm.getQueryAsDOM(queryFormProperties, queryFormProperties.getProperty("template")); Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/DOMUtils.java =================================================================== --- lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/DOMUtils.java (revision 1381159) +++ lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/DOMUtils.java (working copy) @@ -212,7 +212,7 @@ } return doc; - } + } } Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java =================================================================== --- lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java (revision 1381159) +++ lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java (working copy) @@ -153,5 +153,5 @@ public Query getQuery(Element e) throws ParserException { return queryFactory.getQuery(e); - } + } } Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CorePlusExtensionsParser.java =================================================================== --- lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CorePlusExtensionsParser.java (revision 1381159) +++ lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CorePlusExtensionsParser.java (working copy) @@ -58,6 +58,6 @@ queryFactory.addBuilder("LikeThisQuery", new LikeThisQueryBuilder(analyzer, fields)); queryFactory.addBuilder("BoostingQuery", new BoostingQueryBuilder(queryFactory)); queryFactory.addBuilder("FuzzyLikeThisQuery", new FuzzyLikeThisQueryBuilder(analyzer)); - - } + + } } Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/CachedFilterBuilder.java =================================================================== --- lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/CachedFilterBuilder.java (revision 1381159) +++ lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/CachedFilterBuilder.java (working copy) @@ -32,10 +32,10 @@ * Filters are cached in an LRU Cache keyed on the contained query or filter object. Using this will * speed up overall performance for repeated uses of the same expensive query/filter. The sorts of * queries/filters likely to benefit from caching need not necessarily be complex - e.g. simple - * TermQuerys with a large DF (document frequency) can be expensive on large indexes. - * A good example of this might be a term query on a field with only 2 possible values - + * TermQuerys with a large DF (document frequency) can be expensive on large indexes. + * A good example of this might be a term query on a field with only 2 possible values - * "true" or "false". In a large index, querying or filtering on this field requires reading - * millions of document ids from disk which can more usefully be cached as a filter bitset. + * millions of document ids from disk which can more usefully be cached as a filter bitset. *

* For Queries/Filters to be cached and reused the object must implement hashcode and * equals methods correctly so that duplicate queries/filters can be detected in the cache. Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/UserInputQueryBuilder.java =================================================================== --- lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/UserInputQueryBuilder.java (revision 1381159) +++ lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/UserInputQueryBuilder.java (working copy) @@ -88,6 +88,6 @@ */ protected QueryParser createQueryParser(String fieldName, Analyzer analyzer) { return new QueryParser(Version.LUCENE_CURRENT, fieldName, analyzer); - } + } } Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/NumericRangeFilterBuilder.java =================================================================== --- lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/NumericRangeFilterBuilder.java (revision 1381159) +++ lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/NumericRangeFilterBuilder.java (working copy) @@ -158,7 +158,7 @@ @Override public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException { return null; - } + } - } + } } Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilder.java =================================================================== --- lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilder.java (revision 1381159) +++ lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilder.java (working copy) @@ -24,6 +24,6 @@ * Interface for retrieving a {@link SpanQuery}. */ public interface SpanQueryBuilder extends QueryBuilder { - - public SpanQuery getSpanQuery(Element e) throws ParserException; + + public SpanQuery getSpanQuery(Element e) throws ParserException; } Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/ParserException.java =================================================================== --- lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/ParserException.java (revision 1381159) +++ lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/ParserException.java (working copy) @@ -25,32 +25,32 @@ */ public class ParserException extends Exception { - /** - * - */ - public ParserException() { - super(); - } + /** + * + */ + public ParserException() { + super(); + } - /** - * @param message - */ - public ParserException(String message) { - super(message); - } + /** + * @param message + */ + public ParserException(String message) { + super(message); + } - /** - * @param message - * @param cause - */ - public ParserException(String message, Throwable cause) { - super(message, cause); - } + /** + * @param message + * @param cause + */ + public ParserException(String message, Throwable cause) { + super(message, cause); + } - /** - * @param cause - */ - public ParserException(Throwable cause) { - super(cause); - } + /** + * @param cause + */ + public ParserException(Throwable cause) { + super(cause); + } } Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/FilterBuilder.java =================================================================== --- lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/FilterBuilder.java (revision 1381159) +++ lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/FilterBuilder.java (working copy) @@ -27,5 +27,5 @@ */ public interface FilterBuilder { - public Filter getFilter(Element e) throws ParserException; + public Filter getFilter(Element e) throws ParserException; } Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilder.java =================================================================== --- lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilder.java (revision 1381159) +++ lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilder.java (working copy) @@ -23,6 +23,6 @@ * expected to be thread-safe so that they can be used to simultaneously parse multiple XML documents. */ public interface QueryBuilder { - - public Query getQuery(Element e) throws ParserException; + + public Query getQuery(Element e) throws ParserException; } Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryTemplateManager.java =================================================================== --- lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryTemplateManager.java (revision 1381159) +++ lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryTemplateManager.java (working copy) @@ -199,5 +199,5 @@ org.w3c.dom.Document xslDoc = builder.parse(xslIs); DOMSource ds = new DOMSource(xslDoc); return tFactory.newTemplates(ds); - } + } } Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/FastCharStream.java =================================================================== --- lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/FastCharStream.java (revision 1381159) +++ lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/FastCharStream.java (working copy) @@ -29,13 +29,13 @@ public final class FastCharStream implements CharStream { char[] buffer = null; - int bufferLength = 0; // end of valid chars - int bufferPosition = 0; // next char to read + int bufferLength = 0; // end of valid chars + int bufferPosition = 0; // next char to read - int tokenStart = 0; // offset in buffer - int bufferStart = 0; // position in file of buffer + int tokenStart = 0; // offset in buffer + int bufferStart = 0; // position in file of buffer - Reader input; // source of chars + Reader input; // source of chars /** Constructs from a Reader. */ public FastCharStream(Reader r) { @@ -51,24 +51,24 @@ private final void refill() throws IOException { int newPosition = bufferLength - tokenStart; - if (tokenStart == 0) { // token won't fit in buffer - if (buffer == null) { // first time: alloc buffer - buffer = new char[2048]; + if (tokenStart == 0) { // token won't fit in buffer + if (buffer == null) { // first time: alloc buffer + buffer = new char[2048]; } else if (bufferLength == buffer.length) { // grow buffer - char[] newBuffer = new char[buffer.length*2]; - System.arraycopy(buffer, 0, newBuffer, 0, bufferLength); - buffer = newBuffer; + char[] newBuffer = new char[buffer.length*2]; + System.arraycopy(buffer, 0, newBuffer, 0, bufferLength); + buffer = newBuffer; } - } else { // shift token to front + } else { // shift token to front System.arraycopy(buffer, tokenStart, buffer, 0, newPosition); } - bufferLength = newPosition; // update state + bufferLength = newPosition; // update state bufferPosition = newPosition; bufferStart += tokenStart; tokenStart = 0; - int charsRead = // fill space in buffer + int charsRead = // fill space in buffer input.read(buffer, newPosition, buffer.length-newPosition); if (charsRead == -1) throw new IOException("read past eof"); Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/BasicQueryFactory.java =================================================================== --- lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/BasicQueryFactory.java (revision 1381159) +++ lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/BasicQueryFactory.java (working copy) @@ -47,9 +47,9 @@ public String toString() { return getClass().getName() - + "(maxBasicQueries: " + maxBasicQueries - + ", queriesMade: " + queriesMade - + ")"; + + "(maxBasicQueries: " + maxBasicQueries + + ", queriesMade: " + queriesMade + + ")"; } private boolean atMax() { Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/RewriteQuery.java =================================================================== --- lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/RewriteQuery.java (revision 1381159) +++ lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/RewriteQuery.java (working copy) @@ -45,19 +45,19 @@ @Override public String toString(String field) { return getClass().getName() - + (field == null ? "" : "(unused: " + field + ")") - + "(" + fieldName - + ", " + srndQuery.toString() - + ", " + qf.toString() - + ")"; + + (field == null ? "" : "(unused: " + field + ")") + + "(" + fieldName + + ", " + srndQuery.toString() + + ", " + qf.toString() + + ")"; } @Override public int hashCode() { return getClass().hashCode() - ^ fieldName.hashCode() - ^ qf.hashCode() - ^ srndQuery.hashCode(); + ^ fieldName.hashCode() + ^ qf.hashCode() + ^ srndQuery.hashCode(); } @Override @@ -68,8 +68,8 @@ return false; RewriteQuery other = (RewriteQuery)obj; return fieldName.equals(other.fieldName) - && qf.equals(other.qf) - && srndQuery.equals(other.srndQuery); + && qf.equals(other.qf) + && srndQuery.equals(other.srndQuery); } /** @throws UnsupportedOperationException */ Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SimpleTermRewriteQuery.java =================================================================== --- lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SimpleTermRewriteQuery.java (revision 1381159) +++ lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SimpleTermRewriteQuery.java (working copy) @@ -37,16 +37,16 @@ public Query rewrite(IndexReader reader) throws IOException { final List luceneSubQueries = new ArrayList(); srndQuery.visitMatchingTerms(reader, fieldName, - new SimpleTerm.MatchingTermVisitor() { - public void visitMatchingTerm(Term term) throws IOException { - luceneSubQueries.add(qf.newTermQuery(term)); - } - }); + new SimpleTerm.MatchingTermVisitor() { + public void visitMatchingTerm(Term term) throws IOException { + luceneSubQueries.add(qf.newTermQuery(term)); + } + }); return (luceneSubQueries.size() == 0) ? SrndQuery.theEmptyLcnQuery - : (luceneSubQueries.size() == 1) ? luceneSubQueries.get(0) - : SrndBooleanQuery.makeBooleanQuery( - /* luceneSubQueries all have default weight */ - luceneSubQueries, BooleanClause.Occur.SHOULD); /* OR the subquery terms */ + : (luceneSubQueries.size() == 1) ? luceneSubQueries.get(0) + : SrndBooleanQuery.makeBooleanQuery( + /* luceneSubQueries all have default weight */ + luceneSubQueries, BooleanClause.Occur.SHOULD); /* OR the subquery terms */ } } Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/FastCharStream.java =================================================================== --- lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/FastCharStream.java (revision 1381159) +++ lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/FastCharStream.java (working copy) @@ -26,13 +26,13 @@ public final class FastCharStream implements CharStream { char[] buffer = null; - int bufferLength = 0; // end of valid chars - int bufferPosition = 0; // next char to read + int bufferLength = 0; // end of valid chars + int bufferPosition = 0; // next char to read - int tokenStart = 0; // offset in buffer - int bufferStart = 0; // position in file of buffer + int tokenStart = 0; // offset in buffer + int bufferStart = 0; // position in file of buffer - Reader input; // source of chars + Reader input; // source of chars /** Constructs from a Reader. */ public FastCharStream(Reader r) { @@ -48,24 +48,24 @@ private final void refill() throws IOException { int newPosition = bufferLength - tokenStart; - if (tokenStart == 0) { // token won't fit in buffer - if (buffer == null) { // first time: alloc buffer - buffer = new char[2048]; + if (tokenStart == 0) { // token won't fit in buffer + if (buffer == null) { // first time: alloc buffer + buffer = new char[2048]; } else if (bufferLength == buffer.length) { // grow buffer - char[] newBuffer = new char[buffer.length*2]; - System.arraycopy(buffer, 0, newBuffer, 0, bufferLength); - buffer = newBuffer; + char[] newBuffer = new char[buffer.length * 2]; + System.arraycopy(buffer, 0, newBuffer, 0, bufferLength); + buffer = newBuffer; } - } else { // shift token to front + } else { // shift token to front System.arraycopy(buffer, tokenStart, buffer, 0, newPosition); } - bufferLength = newPosition; // update state + bufferLength = newPosition; // update state bufferPosition = newPosition; bufferStart += tokenStart; tokenStart = 0; - int charsRead = // fill space in buffer + int charsRead = // fill space in buffer input.read(buffer, newPosition, buffer.length-newPosition); if (charsRead == -1) throw new IOException("read past eof"); Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.java =================================================================== --- lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.java (revision 1381159) +++ lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.java (working copy) @@ -272,27 +272,27 @@ // if (mods == ModifierQueryNode.Modifier.MOD_NONE) firstQuery=q; // // // do not create modifier nodes with MOD_NONE -// if (mods != ModifierQueryNode.Modifier.MOD_NONE) { -// q = new ModifierQueryNode(q, mods); -// } -// clauses.add(q); +// if (mods != ModifierQueryNode.Modifier.MOD_NONE) { +// q = new ModifierQueryNode(q, mods); +// } +// clauses.add(q); // } // ( // conj=Conjunction() mods=Modifiers() q=Clause(field) // { -// // do not create modifier nodes with MOD_NONE -// if (mods != ModifierQueryNode.Modifier.MOD_NONE) { -// q = new ModifierQueryNode(q, mods); -// } -// clauses.add(q); -// //TODO: figure out what to do with AND and ORs +// // do not create modifier nodes with MOD_NONE +// if (mods != ModifierQueryNode.Modifier.MOD_NONE) { +// q = new ModifierQueryNode(q, mods); +// } +// clauses.add(q); +// //TODO: figure out what to do with AND and ORs // } // )* // { // if (clauses.size() == 1 && firstQuery != null) // return firstQuery; // else { -// return new BooleanQueryNode(clauses); +// return new BooleanQueryNode(clauses); // } // } // } @@ -690,8 +690,8 @@ } catch (Exception ignored) { /* Should this be handled somehow? (defaults to "no PhraseSlop", if - * slop number is invalid) - */ + * slop number is invalid) + */ } } break; @@ -710,8 +710,8 @@ } } catch (Exception ignored) { /* Should this be handled somehow? (defaults to "no boost", if - * boost number is invalid) - */ + * boost number is invalid) + */ } } {if (true) return q;} Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/FastCharStream.java =================================================================== --- lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/FastCharStream.java (revision 1381159) +++ lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/FastCharStream.java (working copy) @@ -29,13 +29,13 @@ public final class FastCharStream implements CharStream { char[] buffer = null; - int bufferLength = 0; // end of valid chars - int bufferPosition = 0; // next char to read + int bufferLength = 0; // end of valid chars + int bufferPosition = 0; // next char to read - int tokenStart = 0; // offset in buffer - int bufferStart = 0; // position in file of buffer + int tokenStart = 0; // offset in buffer + int bufferStart = 0; // position in file of buffer - Reader input; // source of chars + Reader input; // source of chars /** Constructs from a Reader. */ public FastCharStream(Reader r) { @@ -51,24 +51,24 @@ private final void refill() throws IOException { int newPosition = bufferLength - tokenStart; - if (tokenStart == 0) { // token won't fit in buffer - if (buffer == null) { // first time: alloc buffer - buffer = new char[2048]; + if (tokenStart == 0) { // token won't fit in buffer + if (buffer == null) { // first time: alloc buffer + buffer = new char[2048]; } else if (bufferLength == buffer.length) { // grow buffer - char[] newBuffer = new char[buffer.length*2]; - System.arraycopy(buffer, 0, newBuffer, 0, bufferLength); - buffer = newBuffer; + char[] newBuffer = new char[buffer.length * 2]; + System.arraycopy(buffer, 0, newBuffer, 0, bufferLength); + buffer = newBuffer; } - } else { // shift token to front + } else { // shift token to front System.arraycopy(buffer, tokenStart, buffer, 0, newPosition); } - bufferLength = newPosition; // update state + bufferLength = newPosition; // update state bufferPosition = newPosition; bufferStart += tokenStart; tokenStart = 0; - int charsRead = // fill space in buffer + int charsRead = // fill space in buffer input.read(buffer, newPosition, buffer.length-newPosition); if (charsRead == -1) throw new IOException("read past eof"); Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/FieldQueryNode.java =================================================================== --- lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/FieldQueryNode.java (revision 1381159) +++ lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/FieldQueryNode.java (working copy) @@ -179,12 +179,12 @@ } - public CharSequence getValue() { - return getText(); - } + public CharSequence getValue() { + return getText(); + } - public void setValue(CharSequence value) { - setText(value); - } + public void setValue(CharSequence value) { + setText(value); + } } Index: lucene/core/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java =================================================================== --- lucene/core/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java (revision 1381159) +++ lucene/core/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java (working copy) @@ -140,7 +140,7 @@ query = newPhraseQuery("field", "twenty two", true, new AveragePayloadFunction()); QueryUtils.check(query); - + // all 10 hits should have score = 3 because adjacent terms have payloads of 2,4 // and all the similarity factors are set to 1 hits = searcher.search(query, null, 100); @@ -162,8 +162,8 @@ assertEquals("should be 100 hits", 100, hits.totalHits); for (int j = 0; j < hits.scoreDocs.length; j++) { ScoreDoc doc = hits.scoreDocs[j]; - // System.out.println("Doc: " + doc.toString()); - // System.out.println("Explain: " + searcher.explain(query, doc.doc)); + // System.out.println("Doc: " + doc.toString()); + // System.out.println("Explain: " + searcher.explain(query, doc.doc)); assertTrue(doc.score + " does not equal: " + 3, doc.score == 3); } } @@ -192,71 +192,71 @@ } public void testAverageFunction() throws IOException { - PayloadNearQuery query; - TopDocs hits; + PayloadNearQuery query; + TopDocs hits; - query = newPhraseQuery("field", "twenty two", true, new AveragePayloadFunction()); - QueryUtils.check(query); - // all 10 hits should have score = 3 because adjacent terms have payloads of 2,4 - // and all the similarity factors are set to 1 - hits = searcher.search(query, null, 100); - assertTrue("hits is null and it shouldn't be", hits != null); - assertTrue("should be 10 hits", hits.totalHits == 10); - for (int j = 0; j < hits.scoreDocs.length; j++) { - ScoreDoc doc = hits.scoreDocs[j]; - assertTrue(doc.score + " does not equal: " + 3, doc.score == 3); - Explanation explain = searcher.explain(query, hits.scoreDocs[j].doc); - String exp = explain.toString(); - assertTrue(exp, exp.indexOf("AveragePayloadFunction") > -1); - assertTrue(hits.scoreDocs[j].score + " explain value does not equal: " + 3, explain.getValue() == 3f); - } + query = newPhraseQuery("field", "twenty two", true, new AveragePayloadFunction()); + QueryUtils.check(query); + // all 10 hits should have score = 3 because adjacent terms have payloads of 2,4 + // and all the similarity factors are set to 1 + hits = searcher.search(query, null, 100); + assertTrue("hits is null and it shouldn't be", hits != null); + assertTrue("should be 10 hits", hits.totalHits == 10); + for (int j = 0; j < hits.scoreDocs.length; j++) { + ScoreDoc doc = hits.scoreDocs[j]; + assertTrue(doc.score + " does not equal: " + 3, doc.score == 3); + Explanation explain = searcher.explain(query, hits.scoreDocs[j].doc); + String exp = explain.toString(); + assertTrue(exp, exp.indexOf("AveragePayloadFunction") > -1); + assertTrue(hits.scoreDocs[j].score + " explain value does not equal: " + 3, explain.getValue() == 3f); + } } public void testMaxFunction() throws IOException { - PayloadNearQuery query; - TopDocs hits; + PayloadNearQuery query; + TopDocs hits; - query = newPhraseQuery("field", "twenty two", true, new MaxPayloadFunction()); - QueryUtils.check(query); - // all 10 hits should have score = 4 (max payload value) - hits = searcher.search(query, null, 100); - assertTrue("hits is null and it shouldn't be", hits != null); - assertTrue("should be 10 hits", hits.totalHits == 10); - for (int j = 0; j < hits.scoreDocs.length; j++) { - ScoreDoc doc = hits.scoreDocs[j]; - assertTrue(doc.score + " does not equal: " + 4, doc.score == 4); - Explanation explain = searcher.explain(query, hits.scoreDocs[j].doc); - String exp = explain.toString(); - assertTrue(exp, exp.indexOf("MaxPayloadFunction") > -1); - assertTrue(hits.scoreDocs[j].score + " explain value does not equal: " + 4, explain.getValue() == 4f); - } + query = newPhraseQuery("field", "twenty two", true, new MaxPayloadFunction()); + QueryUtils.check(query); + // all 10 hits should have score = 4 (max payload value) + hits = searcher.search(query, null, 100); + assertTrue("hits is null and it shouldn't be", hits != null); + assertTrue("should be 10 hits", hits.totalHits == 10); + for (int j = 0; j < hits.scoreDocs.length; j++) { + ScoreDoc doc = hits.scoreDocs[j]; + assertTrue(doc.score + " does not equal: " + 4, doc.score == 4); + Explanation explain = searcher.explain(query, hits.scoreDocs[j].doc); + String exp = explain.toString(); + assertTrue(exp, exp.indexOf("MaxPayloadFunction") > -1); + assertTrue(hits.scoreDocs[j].score + " explain value does not equal: " + 4, explain.getValue() == 4f); + } } public void testMinFunction() throws IOException { - PayloadNearQuery query; - TopDocs hits; + PayloadNearQuery query; + TopDocs hits; - query = newPhraseQuery("field", "twenty two", true, new MinPayloadFunction()); - QueryUtils.check(query); - // all 10 hits should have score = 2 (min payload value) - hits = searcher.search(query, null, 100); - assertTrue("hits is null and it shouldn't be", hits != null); - assertTrue("should be 10 hits", hits.totalHits == 10); - for (int j = 0; j < hits.scoreDocs.length; j++) { - ScoreDoc doc = hits.scoreDocs[j]; - assertTrue(doc.score + " does not equal: " + 2, doc.score == 2); - Explanation explain = searcher.explain(query, hits.scoreDocs[j].doc); - String exp = explain.toString(); - assertTrue(exp, exp.indexOf("MinPayloadFunction") > -1); - assertTrue(hits.scoreDocs[j].score + " explain value does not equal: " + 2, explain.getValue() == 2f); - } + query = newPhraseQuery("field", "twenty two", true, new MinPayloadFunction()); + QueryUtils.check(query); + // all 10 hits should have score = 2 (min payload value) + hits = searcher.search(query, null, 100); + assertTrue("hits is null and it shouldn't be", hits != null); + assertTrue("should be 10 hits", hits.totalHits == 10); + for (int j = 0; j < hits.scoreDocs.length; j++) { + ScoreDoc doc = hits.scoreDocs[j]; + assertTrue(doc.score + " does not equal: " + 2, doc.score == 2); + Explanation explain = searcher.explain(query, hits.scoreDocs[j].doc); + String exp = explain.toString(); + assertTrue(exp, exp.indexOf("MinPayloadFunction") > -1); + assertTrue(hits.scoreDocs[j].score + " explain value does not equal: " + 2, explain.getValue() == 2f); + } } private SpanQuery[] getClauses() { - SpanNearQuery q1, q2; - q1 = spanNearQuery("field2", "twenty two"); - q2 = spanNearQuery("field2", "twenty three"); - SpanQuery[] clauses = new SpanQuery[2]; - clauses[0] = q1; - clauses[1] = q2; - return clauses; + SpanNearQuery q1, q2; + q1 = spanNearQuery("field2", "twenty two"); + q2 = spanNearQuery("field2", "twenty three"); + SpanQuery[] clauses = new SpanQuery[2]; + clauses[0] = q1; + clauses[1] = q2; + return clauses; } private SpanNearQuery spanNearQuery(String fieldName, String words) { String[] wordList = words.split("[\\s]+"); @@ -274,8 +274,8 @@ hits = searcher.search(query, null, 100); assertTrue("hits is null and it shouldn't be", hits != null); ScoreDoc doc = hits.scoreDocs[0]; - // System.out.println("Doc: " + doc.toString()); - // System.out.println("Explain: " + searcher.explain(query, doc.doc)); + // System.out.println("Doc: " + doc.toString()); + // System.out.println("Explain: " + searcher.explain(query, doc.doc)); assertTrue("there should only be one hit", hits.totalHits == 1); // should have score = 3 because adjacent terms have payloads of 2,4 assertTrue(doc.score + " does not equal: " + 3, doc.score == 3); @@ -299,8 +299,8 @@ assertTrue("should only be one hit", hits.scoreDocs.length == 1); // the score should be 3 - the average of all the underlying payloads ScoreDoc doc = hits.scoreDocs[0]; - // System.out.println("Doc: " + doc.toString()); - // System.out.println("Explain: " + searcher.explain(query, doc.doc)); + // System.out.println("Doc: " + doc.toString()); + // System.out.println("Explain: " + searcher.explain(query, doc.doc)); assertTrue(doc.score + " does not equal: " + 3, doc.score == 3); } Index: lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java =================================================================== --- lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java (revision 1381159) +++ lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java (working copy) @@ -62,15 +62,15 @@ }; } }; - - + + DocIdSet filteredSet = new FilteredDocIdSet(innerSet){ @Override protected boolean match(int docid) { return docid%2 == 0; //validate only even docids - } + } }; - + DocIdSetIterator iter = filteredSet.iterator(); ArrayList list = new ArrayList(); int doc = iter.advance(3); @@ -80,7 +80,7 @@ list.add(Integer.valueOf(doc)); } } - + int[] docs = new int[list.size()]; int c=0; Iterator intIter = list.iterator(); @@ -151,7 +151,7 @@ @Override protected boolean match(int docid) { return true; - } + } }; } }; Index: lucene/core/src/test/org/apache/lucene/search/spans/TestBasics.java =================================================================== --- lucene/core/src/test/org/apache/lucene/search/spans/TestBasics.java (revision 1381159) +++ lucene/core/src/test/org/apache/lucene/search/spans/TestBasics.java (working copy) @@ -582,21 +582,21 @@ @Test public void testSpansSkipTo() throws Exception { - SpanTermQuery t1 = new SpanTermQuery(new Term("field", "seventy")); - SpanTermQuery t2 = new SpanTermQuery(new Term("field", "seventy")); - Spans s1 = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), t1); - Spans s2 = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), t2); - - assertTrue(s1.next()); - assertTrue(s2.next()); - - boolean hasMore = true; - - do { - hasMore = skipToAccoringToJavaDocs(s1, s1.doc()); - assertEquals(hasMore, s2.skipTo(s2.doc())); - assertEquals(s1.doc(), s2.doc()); - } while (hasMore); + SpanTermQuery t1 = new SpanTermQuery(new Term("field", "seventy")); + SpanTermQuery t2 = new SpanTermQuery(new Term("field", "seventy")); + Spans s1 = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), t1); + Spans s2 = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), t2); + + assertTrue(s1.next()); + assertTrue(s2.next()); + + boolean hasMore = true; + + do { + hasMore = skipToAccoringToJavaDocs(s1, s1.doc()); + assertEquals(hasMore, s2.skipTo(s2.doc())); + assertEquals(s1.doc(), s2.doc()); + } while (hasMore); } /** Skips to the first match beyond the current, whose document number is Index: lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java =================================================================== --- lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java (revision 1381159) +++ lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java (working copy) @@ -145,7 +145,7 @@ try { cc.replay(new NoOpCollector(false)); // this call should fail fail("should have failed if an in-order Collector was given to replay(), " + - "while CachingCollector was initialized with out-of-order collection"); + "while CachingCollector was initialized with out-of-order collection"); } catch (IllegalArgumentException e) { // ok } Index: lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java =================================================================== --- lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java (revision 1381159) +++ lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java (working copy) @@ -101,7 +101,7 @@ } // not similar enough: - query = new FuzzyQuery(new Term("field", "xxxxx"), FuzzyQuery.defaultMaxEdits, 0); + query = new FuzzyQuery(new Term("field", "xxxxx"), FuzzyQuery.defaultMaxEdits, 0); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(0, hits.length); query = new FuzzyQuery(new Term("field", "aaccc"), FuzzyQuery.defaultMaxEdits, 0); // edit distance to "aaaaa" = 3 Index: lucene/core/src/test/org/apache/lucene/index/TestTransactions.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestTransactions.java (revision 1381159) +++ lucene/core/src/test/org/apache/lucene/index/TestTransactions.java (working copy) @@ -129,7 +129,7 @@ } try { writer2.prepareCommit(); - } catch (Throwable t) { + } catch (Throwable t) { writer1.rollback(); writer2.rollback(); return; Index: lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java (revision 1381159) +++ lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java (working copy) @@ -41,13 +41,13 @@ */ public class TestTransactionRollback extends LuceneTestCase { - + private static final String FIELD_RECORD_ID = "record_id"; private Directory dir; - + //Rolls back index to a chosen ID private void rollBackLast(int id) throws Exception { - + // System.out.println("Attempting to rollback to "+id); String ids="-"+id; IndexCommit last=null; @@ -62,7 +62,7 @@ if (last==null) throw new RuntimeException("Couldn't find commit point "+id); - + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setIndexDeletionPolicy( new RollbackDeletionPolicy(id)).setIndexCommit(last)); @@ -72,22 +72,22 @@ w.close(); } - public void testRepeatedRollBacks() throws Exception { + public void testRepeatedRollBacks() throws Exception { int expectedLastRecordId=100; while (expectedLastRecordId>10) { - expectedLastRecordId -=10; + expectedLastRecordId -=10; rollBackLast(expectedLastRecordId); BitSet expecteds = new BitSet(100); expecteds.set(1,(expectedLastRecordId+1),true); - checkExpecteds(expecteds); + checkExpecteds(expecteds); } } - + private void checkExpecteds(BitSet expecteds) throws Exception { IndexReader r = DirectoryReader.open(dir); - + //Perhaps not the most efficient approach but meets our //needs here. final Bits liveDocs = MultiFields.getLiveDocs(r); @@ -114,7 +114,7 @@ Collection files = comm.getFileNames(); for (Iterator iterator2 = files.iterator(); iterator2.hasNext();) { String filename = (String) iterator2.next(); - System.out.print(filename+", "); + System.out.print(filename+", "); } System.out.println(); } @@ -133,7 +133,7 @@ Document doc=new Document(); doc.add(newTextField(FIELD_RECORD_ID, ""+currentRecordId, Field.Store.YES)); w.addDocument(doc); - + if (currentRecordId%10 == 0) { Map data = new HashMap(); data.put("index", "records 1-"+currentRecordId); @@ -177,16 +177,16 @@ " UserData="+commit.getUserData() +") ("+(commits.size()-1)+" commit points left) files="); Collection files = commit.getFileNames(); for (Iterator iterator2 = files.iterator(); iterator2.hasNext();) { - System.out.print(" "+iterator2.next()); + System.out.print(" "+iterator2.next()); } System.out.println(); */ - - commit.delete(); + + commit.delete(); } } } - } + } } class DeleteLastCommitPolicy implements IndexDeletionPolicy { @@ -198,7 +198,7 @@ } } - public void testRollbackDeletionPolicy() throws Exception { + public void testRollbackDeletionPolicy() throws Exception { for(int i=0;i<2;i++) { // Unless you specify a prior commit point, rollback // should not work: @@ -209,7 +209,7 @@ r.close(); } } - + // Keeps all commit points (used to build index) class KeepAllDeletionPolicy implements IndexDeletionPolicy { public void onCommit(List commits) throws IOException {} Index: lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java (revision 1381159) +++ lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java (working copy) @@ -78,7 +78,7 @@ offsetAtt.setOffset(starts[nextTokenIndex], ends[nextTokenIndex]); posIncAtt.setPositionIncrement(incs[nextTokenIndex]); nextTokenIndex++; - return true; + return true; } else { return false; } Index: lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java (revision 1381159) +++ lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java (working copy) @@ -111,27 +111,27 @@ } final IndexReader r; - final IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) - .setOpenMode(IndexWriterConfig.OpenMode.CREATE) - .setMergePolicy(newLogMergePolicy()); - iwc.setRAMBufferSizeMB(16.0 + 16.0 * random().nextDouble()); - iwc.setMaxBufferedDocs(-1); - final RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc); - - for(int idx=0;idx= 0) && (! a.get(aa))) { - aa--; + aa--; } if (b.length() == 0) { bb = -1; Index: lucene/core/src/test/org/apache/lucene/util/TestBitUtil.java =================================================================== --- lucene/core/src/test/org/apache/lucene/util/TestBitUtil.java (revision 1381159) +++ lucene/core/src/test/org/apache/lucene/util/TestBitUtil.java (working copy) @@ -71,12 +71,12 @@ long sumRes = 0; while (iters-- >= 0) { for (int i = 1; i <= 63; i++) { - long a = testArg(i); - sumRes += BitUtil.nlz(a); - sumRes += BitUtil.nlz(a+1); - sumRes += BitUtil.nlz(a-1); - sumRes += BitUtil.nlz(a+10); - sumRes += BitUtil.nlz(a-10); + long a = testArg(i); + sumRes += BitUtil.nlz(a); + sumRes += BitUtil.nlz(a + 1); + sumRes += BitUtil.nlz(a - 1); + sumRes += BitUtil.nlz(a + 10); + sumRes += BitUtil.nlz(a - 10); } } return sumRes; @@ -86,12 +86,12 @@ long sumRes = 0; while (iters-- >= 0) { for (int i = 1; i <= 63; i++) { - long a = testArg(i); - sumRes += Long.numberOfLeadingZeros(a); - sumRes += Long.numberOfLeadingZeros(a+1); - sumRes += Long.numberOfLeadingZeros(a-1); - sumRes += Long.numberOfLeadingZeros(a+10); - sumRes += Long.numberOfLeadingZeros(a-10); + long a = testArg(i); + sumRes += Long.numberOfLeadingZeros(a); + sumRes += Long.numberOfLeadingZeros(a + 1); + sumRes += Long.numberOfLeadingZeros(a - 1); + sumRes += Long.numberOfLeadingZeros(a + 10); + sumRes += Long.numberOfLeadingZeros(a - 10); } } return sumRes; Index: lucene/core/src/test/org/apache/lucene/util/TestOpenBitSet.java =================================================================== --- lucene/core/src/test/org/apache/lucene/util/TestOpenBitSet.java (revision 1381159) +++ lucene/core/src/test/org/apache/lucene/util/TestOpenBitSet.java (working copy) @@ -71,7 +71,7 @@ // aa = a.prevSetBit(aa-1); aa--; while ((aa >= 0) && (! a.get(aa))) { - aa--; + aa--; } bb = b.prevSetBit(bb-1); assertEquals(aa,bb); @@ -85,7 +85,7 @@ // aa = a.prevSetBit(aa-1); aa--; while ((aa >= 0) && (! a.get(aa))) { - aa--; + aa--; } bb = (int) b.prevSetBit((long) (bb-1)); assertEquals(aa,bb); Index: lucene/core/src/java/org/apache/lucene/codecs/lucene40/BitVector.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene40/BitVector.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene40/BitVector.java (working copy) @@ -163,7 +163,7 @@ int c = 0; int end = bits.length; for (int i = 0; i < end; i++) { - c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte + c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte } count = c; } @@ -176,12 +176,12 @@ int c = 0; int end = bits.length; for (int i = 0; i < end; i++) { - c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte + c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte } return c; } - private static final byte[] BYTE_COUNTS = { // table of bits/byte + private static final byte[] BYTE_COUNTS = { // table of bits/byte 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, Index: lucene/core/src/java/org/apache/lucene/codecs/lucene3x/TermBuffer.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene3x/TermBuffer.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene3x/TermBuffer.java (working copy) @@ -46,7 +46,7 @@ int newSuffixStart; // only valid right after .read is called public int compareTo(TermBuffer other) { - if (field == other.field) // fields are interned + if (field == other.field) // fields are interned // (only by PreFlex codec) return utf8AsUTF16Comparator.compare(bytes, other.bytes); else Index: lucene/core/src/java/org/apache/lucene/codecs/lucene3x/SegmentTermEnum.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene3x/SegmentTermEnum.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene3x/SegmentTermEnum.java (working copy) @@ -141,15 +141,15 @@ termBuffer.read(input, fieldInfos); newSuffixStart = termBuffer.newSuffixStart; - termInfo.docFreq = input.readVInt(); // read doc freq - termInfo.freqPointer += input.readVLong(); // read freq pointer - termInfo.proxPointer += input.readVLong(); // read prox pointer + termInfo.docFreq = input.readVInt(); // read doc freq + termInfo.freqPointer += input.readVLong(); // read freq pointer + termInfo.proxPointer += input.readVLong(); // read prox pointer if (termInfo.docFreq >= skipInterval) termInfo.skipOffset = input.readVInt(); if (isIndex) - indexPointer += input.readVLong(); // read index pointer + indexPointer += input.readVLong(); // read index pointer //System.out.println(" ste ret term=" + term()); return true; Index: lucene/core/src/java/org/apache/lucene/search/FieldCache.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/FieldCache.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/search/FieldCache.java (working copy) @@ -508,7 +508,7 @@ // this special case is the reason that Arrays.binarySearch() isn't useful. if (key == null) return 0; - + int low = 1; int high = numOrd()-1; Index: lucene/core/src/java/org/apache/lucene/search/payloads/PayloadFunction.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/payloads/PayloadFunction.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/search/payloads/PayloadFunction.java (working copy) @@ -56,10 +56,10 @@ public abstract float docScore(int docId, String field, int numPayloadsSeen, float payloadScore); public Explanation explain(int docId, String field, int numPayloadsSeen, float payloadScore){ - Explanation result = new Explanation(); - result.setDescription(getClass().getSimpleName() + ".docScore()"); - result.setValue(docScore(docId, field, numPayloadsSeen, payloadScore)); - return result; + Explanation result = new Explanation(); + result.setDescription(getClass().getSimpleName() + ".docScore()"); + result.setValue(docScore(docId, field, numPayloadsSeen, payloadScore)); + return result; }; @Override Index: lucene/core/src/java/org/apache/lucene/search/payloads/MinPayloadFunction.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/payloads/MinPayloadFunction.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/search/payloads/MinPayloadFunction.java (working copy) @@ -24,12 +24,12 @@ public class MinPayloadFunction extends PayloadFunction { @Override - public float currentScore(int docId, String field, int start, int end, int numPayloadsSeen, float currentScore, float currentPayloadScore) { + public float currentScore(int docId, String field, int start, int end, int numPayloadsSeen, float currentScore, float currentPayloadScore) { if (numPayloadsSeen == 0) { return currentPayloadScore; } else { - return Math.min(currentPayloadScore, currentScore); - } + return Math.min(currentPayloadScore, currentScore); + } } @Override Index: lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java (working copy) @@ -257,7 +257,7 @@ getPayloads(spansArr); more = spans.next(); } while (more && (doc == spans.doc())); - return true; + return true; } @Override Index: lucene/core/src/java/org/apache/lucene/search/spans/NearSpansOrdered.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/spans/NearSpansOrdered.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/search/spans/NearSpansOrdered.java (working copy) @@ -117,7 +117,7 @@ public int end() { return matchEnd; } public Spans[] getSubSpans() { - return subSpans; + return subSpans; } // TODO: Remove warning after API has been finalized Index: lucene/core/src/java/org/apache/lucene/search/spans/NearSpansUnordered.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/spans/NearSpansUnordered.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/search/spans/NearSpansUnordered.java (working copy) @@ -151,7 +151,7 @@ } } public Spans[] getSubSpans() { - return subSpans; + return subSpans; } @Override public boolean next() throws IOException { @@ -286,7 +286,7 @@ } private void addToList(SpansCell cell) { - if (last != null) { // add next to end of list + if (last != null) { // add next to end of list last.next = cell; } else first = cell; @@ -295,7 +295,7 @@ } private void firstToLast() { - last.next = first; // move first to end of list + last.next = first; // move first to end of list last = first; first = first.next; last.next = null; Index: lucene/core/src/java/org/apache/lucene/search/spans/SpanPositionCheckQuery.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/spans/SpanPositionCheckQuery.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/search/spans/SpanPositionCheckQuery.java (working copy) @@ -57,7 +57,7 @@ @Override public void extractTerms(Set terms) { - match.extractTerms(terms); + match.extractTerms(terms); } /** @@ -186,4 +186,4 @@ } } -} \ No newline at end of file +} Index: lucene/core/src/java/org/apache/lucene/search/spans/Spans.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/spans/Spans.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/search/spans/Spans.java (working copy) @@ -34,7 +34,7 @@ * boolean skipTo(int target) { * do { * if (!next()) - * return false; + * return false; * } while (target > doc()); * return true; * } Index: lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java (working copy) @@ -92,9 +92,9 @@ @Override public void extractTerms(Set terms) { - for (final SpanQuery clause : clauses) { - clause.extractTerms(terms); - } + for (final SpanQuery clause : clauses) { + clause.extractTerms(terms); + } } Index: lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java (working copy) @@ -110,7 +110,7 @@ * Do not modify the List or its contents. */ public List getTermArrays() { - return Collections.unmodifiableList(termArrays); + return Collections.unmodifiableList(termArrays); } /** Index: lucene/core/src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java (working copy) @@ -28,7 +28,7 @@ public abstract class FilteredDocIdSetIterator extends DocIdSetIterator { protected DocIdSetIterator _innerIter; private int doc; - + /** * Constructor. * @param innerIter Underlying DocIdSetIterator. @@ -40,7 +40,7 @@ _innerIter = innerIter; doc = -1; } - + /** * Validation method to determine whether a docid should be in the result set. * @param doc docid to be tested @@ -48,7 +48,7 @@ * @see #FilteredDocIdSetIterator(DocIdSetIterator) */ protected abstract boolean match(int doc); - + @Override public int docID() { return doc; Index: lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java (working copy) @@ -24,13 +24,13 @@ * Position of a term in a document that takes into account the term offset within the phrase. */ final class PhrasePositions { - int doc; // current doc - int position; // position in doc - int count; // remaining pos in this doc - int offset; // position in phrase + int doc; // current doc + int position; // position in doc + int count; // remaining pos in this doc + int offset; // position in phrase final int ord; // unique across all PhrasePositions instances - final DocsAndPositionsEnum postings; // stream of docs & positions - PhrasePositions next; // used to make lists + final DocsAndPositionsEnum postings; // stream of docs & positions + PhrasePositions next; // used to make lists int rptGroup = -1; // >=0 indicates that this is a repeating PP int rptInd; // index in the rptGroup final Term[] terms; // for repetitions initialization @@ -42,7 +42,7 @@ this.terms = terms; } - final boolean next() throws IOException { // increments to next doc + final boolean next() throws IOException { // increments to next doc doc = postings.nextDoc(); if (doc == DocIdSetIterator.NO_MORE_DOCS) { return false; @@ -59,7 +59,7 @@ } final void firstPosition() throws IOException { - count = postings.freq(); // read first pos + count = postings.freq(); // read first pos nextPosition(); } @@ -70,7 +70,7 @@ * have exactly the same position. */ final boolean nextPosition() throws IOException { - if (count-- > 0) { // read subsequent pos's + if (count-- > 0) { // read subsequent pos's position = postings.nextPosition() - offset; return true; } else Index: lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java (working copy) @@ -397,7 +397,7 @@ public Query rewrite(IndexReader reader) throws IOException { if (minNrShouldMatch == 0 && clauses.size() == 1) { // optimize 1-clause queries BooleanClause c = clauses.get(0); - if (!c.isProhibited()) { // just return clause + if (!c.isProhibited()) { // just return clause Query query = c.getQuery().rewrite(reader); // rewrite first @@ -468,7 +468,7 @@ Query subQuery = c.getQuery(); if (subQuery != null) { - if (subQuery instanceof BooleanQuery) { // wrap sub-bools in parens + if (subQuery instanceof BooleanQuery) { // wrap sub-bools in parens buffer.append("("); buffer.append(subQuery.toString(field)); buffer.append(")"); Index: lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java (working copy) @@ -281,7 +281,7 @@ ArrayUtil.mergeSort(postingsFreqs); } - if (slop == 0) { // optimize exact case + if (slop == 0) { // optimize exact case ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity.exactSimScorer(stats, context)); if (s.noDocs) { return null; Index: lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java (working copy) @@ -52,7 +52,7 @@ * @since lucene 1.4 */ class FieldCacheImpl implements FieldCache { - + private Map,Cache> caches; FieldCacheImpl() { init(); @@ -173,7 +173,7 @@ ((AtomicReader)key).addReaderClosedListener(purgeReader); } else { // last chance - reader.addReaderClosedListener(purgeReader); + reader.addReaderClosedListener(purgeReader); } } } Index: lucene/core/src/java/org/apache/lucene/index/SegmentInfo.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/SegmentInfo.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/index/SegmentInfo.java (working copy) @@ -43,9 +43,9 @@ public static final int NO = -1; // e.g. no norms; no deletes; public static final int YES = 1; // e.g. have norms; have deletes; - public final String name; // unique name in dir - private int docCount; // number of docs in seg - public final Directory dir; // where segment resides + public final String name; // unique name in dir + private int docCount; // number of docs in seg + public final Directory dir; // where segment resides private boolean isCompoundFile; Index: lucene/core/src/java/org/apache/lucene/index/CheckIndex.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/CheckIndex.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/index/CheckIndex.java (working copy) @@ -1689,7 +1689,7 @@ " times, to check more than one segment, eg '-segment _2 -segment _a'.\n" + " You can't use this with the -fix option\n" + " -dir-impl X: use a specific " + FSDirectory.class.getSimpleName() + " implementation. " + - "If no package is specified the " + FSDirectory.class.getPackage().getName() + " package will be used.\n" + + "If no package is specified the " + FSDirectory.class.getPackage().getName() + " package will be used.\n" + "\n" + "**WARNING**: -fix should only be used on an emergency basis as it will cause\n" + "documents (perhaps many) to be permanently removed from the index. Always make\n" + Index: lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java (working copy) @@ -396,11 +396,11 @@ return flushingWriters.size(); } - public boolean doApplyAllDeletes() { + public boolean doApplyAllDeletes() { return flushDeletes.getAndSet(false); } - public void setApplyAllDeletes() { + public void setApplyAllDeletes() { flushDeletes.set(true); } Index: lucene/core/src/java/org/apache/lucene/index/IndexWriter.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/IndexWriter.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/index/IndexWriter.java (working copy) @@ -2894,7 +2894,7 @@ final boolean anySegmentFlushed; synchronized (fullFlushLock) { - boolean flushSuccess = false; + boolean flushSuccess = false; try { anySegmentFlushed = docWriter.flushAllThreads(); flushSuccess = true; Index: lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java (working copy) @@ -571,7 +571,7 @@ infoStream.message("IFD", "delete \"" + fileName + "\""); } directory.deleteFile(fileName); - } catch (IOException e) { // if delete fails + } catch (IOException e) { // if delete fails if (directory.fileExists(fileName)) { // Some operating systems (e.g. Windows) don't Index: lucene/core/src/java/org/apache/lucene/store/Lock.java =================================================================== --- lucene/core/src/java/org/apache/lucene/store/Lock.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/store/Lock.java (working copy) @@ -135,7 +135,7 @@ return doBody(); } finally { if (locked) - lock.release(); + lock.release(); } } } Index: lucene/core/src/java/org/apache/lucene/store/BufferedIndexInput.java =================================================================== --- lucene/core/src/java/org/apache/lucene/store/BufferedIndexInput.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/store/BufferedIndexInput.java (working copy) @@ -41,9 +41,9 @@ protected byte[] buffer; - private long bufferStart = 0; // position in file of buffer - private int bufferLength = 0; // end of valid bytes - private int bufferPosition = 0; // next byte to read + private long bufferStart = 0; // position in file of buffer + private int bufferLength = 0; // end of valid bytes + private int bufferPosition = 0; // next byte to read @Override public final byte readByte() throws IOException { @@ -259,7 +259,7 @@ private void refill() throws IOException { long start = bufferStart + bufferPosition; long end = start + bufferSize; - if (end > length()) // don't read past EOF + if (end > length()) // don't read past EOF end = length(); int newLength = (int)(end - start); if (newLength <= 0) @@ -294,7 +294,7 @@ else { bufferStart = pos; bufferPosition = 0; - bufferLength = 0; // trigger refill() on read() + bufferLength = 0; // trigger refill() on read() seekInternal(pos); } } Index: lucene/core/src/java/org/apache/lucene/util/Constants.java =================================================================== --- lucene/core/src/java/org/apache/lucene/util/Constants.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/util/Constants.java (working copy) @@ -26,7 +26,7 @@ **/ public final class Constants { - private Constants() {} // can't construct + private Constants() {} // can't construct /** JVM vendor info. */ public static final String JVM_VENDOR = System.getProperty("java.vm.vendor"); Index: lucene/core/src/java/org/apache/lucene/util/PriorityQueue.java =================================================================== --- lucene/core/src/java/org/apache/lucene/util/PriorityQueue.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/util/PriorityQueue.java (working copy) @@ -177,11 +177,11 @@ time. */ public final T pop() { if (size > 0) { - T result = heap[1]; // save first value - heap[1] = heap[size]; // move last to first - heap[size] = null; // permit GC of objects + T result = heap[1]; // save first value + heap[1] = heap[size]; // move last to first + heap[size] = null; // permit GC of objects size--; - downHeap(); // adjust heap + downHeap(); // adjust heap return result; } else return null; @@ -226,26 +226,26 @@ private final void upHeap() { int i = size; - T node = heap[i]; // save bottom node + T node = heap[i]; // save bottom node int j = i >>> 1; while (j > 0 && lessThan(node, heap[j])) { - heap[i] = heap[j]; // shift parents down + heap[i] = heap[j]; // shift parents down i = j; j = j >>> 1; } - heap[i] = node; // install saved node + heap[i] = node; // install saved node } private final void downHeap() { int i = 1; - T node = heap[i]; // save top node - int j = i << 1; // find smaller child + T node = heap[i]; // save top node + int j = i << 1; // find smaller child int k = j + 1; if (k <= size && lessThan(heap[k], heap[j])) { j = k; } while (j <= size && lessThan(heap[j], node)) { - heap[i] = heap[j]; // shift up child + heap[i] = heap[j]; // shift up child i = j; j = i << 1; k = j + 1; @@ -253,7 +253,7 @@ j = k; } } - heap[i] = node; // install saved node + heap[i] = node; // install saved node } /** This method returns the internal heap array as Object[]. Index: lucene/core/src/java/org/apache/lucene/document/Field.java =================================================================== --- lucene/core/src/java/org/apache/lucene/document/Field.java (revision 1381159) +++ lucene/core/src/java/org/apache/lucene/document/Field.java (working copy) @@ -691,7 +691,7 @@ @Override public boolean isAnalyzed() { return true; } @Override - public boolean omitNorms() { return false; } + public boolean omitNorms() { return false; } }, /** Index the field's value without using an Analyzer, so it can be searched. @@ -704,7 +704,7 @@ @Override public boolean isAnalyzed() { return false; } @Override - public boolean omitNorms() { return false; } + public boolean omitNorms() { return false; } }, /** Expert: Index the field's value without an Analyzer, @@ -727,7 +727,7 @@ @Override public boolean isAnalyzed() { return false; } @Override - public boolean omitNorms() { return true; } + public boolean omitNorms() { return true; } }, /** Expert: Index the tokens produced by running the @@ -741,7 +741,7 @@ @Override public boolean isAnalyzed() { return true; } @Override - public boolean omitNorms() { return true; } + public boolean omitNorms() { return true; } }; /** Get the best representation of the index given the flags. */ @@ -774,7 +774,7 @@ public abstract boolean isIndexed(); public abstract boolean isAnalyzed(); - public abstract boolean omitNorms(); + public abstract boolean omitNorms(); } /** Specifies whether and how a field should have term vectors. Index: lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/utils/StreamUtilsTest.java =================================================================== --- lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/utils/StreamUtilsTest.java (revision 1381159) +++ lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/utils/StreamUtilsTest.java (working copy) @@ -57,38 +57,38 @@ @Test public void testGetInputStreamBzip2() throws Exception { - assertReadText(rawBzip2File("bz2")); - assertReadText(rawBzip2File("bzip")); - assertReadText(rawBzip2File("BZ2")); - assertReadText(rawBzip2File("BZIP")); + assertReadText(rawBzip2File("bz2")); + assertReadText(rawBzip2File("bzip")); + assertReadText(rawBzip2File("BZ2")); + assertReadText(rawBzip2File("BZIP")); } @Test public void testGetOutputStreamBzip2() throws Exception { - assertReadText(autoOutFile("bz2")); - assertReadText(autoOutFile("bzip")); - assertReadText(autoOutFile("BZ2")); - assertReadText(autoOutFile("BZIP")); + assertReadText(autoOutFile("bz2")); + assertReadText(autoOutFile("bzip")); + assertReadText(autoOutFile("BZ2")); + assertReadText(autoOutFile("BZIP")); } @Test public void testGetOutputStreamGzip() throws Exception { - assertReadText(autoOutFile("gz")); - assertReadText(autoOutFile("gzip")); - assertReadText(autoOutFile("GZ")); - assertReadText(autoOutFile("GZIP")); + assertReadText(autoOutFile("gz")); + assertReadText(autoOutFile("gzip")); + assertReadText(autoOutFile("GZ")); + assertReadText(autoOutFile("GZIP")); } @Test public void testGetOutputStreamPlain() throws Exception { - assertReadText(autoOutFile("txt")); - assertReadText(autoOutFile("text")); - assertReadText(autoOutFile("TXT")); - assertReadText(autoOutFile("TEXT")); + assertReadText(autoOutFile("txt")); + assertReadText(autoOutFile("text")); + assertReadText(autoOutFile("TXT")); + assertReadText(autoOutFile("TEXT")); } private File rawTextFile(String ext) throws Exception { - File f = new File(testDir,"testfile." + ext); + File f = new File(testDir,"testfile." + ext); BufferedWriter w = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(f), IOUtils.CHARSET_UTF_8)); w.write(TEXT); w.newLine(); @@ -97,32 +97,32 @@ } private File rawGzipFile(String ext) throws Exception { - File f = new File(testDir,"testfile." + ext); + File f = new File(testDir,"testfile." + ext); OutputStream os = new CompressorStreamFactory().createCompressorOutputStream(CompressorStreamFactory.GZIP, new FileOutputStream(f)); writeText(os); return f; } private File rawBzip2File(String ext) throws Exception { - File f = new File(testDir,"testfile." + ext); - OutputStream os = new CompressorStreamFactory().createCompressorOutputStream(CompressorStreamFactory.BZIP2, new FileOutputStream(f)); - writeText(os); - return f; + File f = new File(testDir,"testfile." + ext); + OutputStream os = new CompressorStreamFactory().createCompressorOutputStream(CompressorStreamFactory.BZIP2, new FileOutputStream(f)); + writeText(os); + return f; } private File autoOutFile(String ext) throws Exception { - File f = new File(testDir,"testfile." + ext); - OutputStream os = StreamUtils.outputStream(f); - writeText(os); - return f; + File f = new File(testDir,"testfile." + ext); + OutputStream os = StreamUtils.outputStream(f); + writeText(os); + return f; } - private void writeText(OutputStream os) throws IOException { - BufferedWriter w = new BufferedWriter(new OutputStreamWriter(os, IOUtils.CHARSET_UTF_8)); - w.write(TEXT); - w.newLine(); - w.close(); - } + private void writeText(OutputStream os) throws IOException { + BufferedWriter w = new BufferedWriter(new OutputStreamWriter(os, IOUtils.CHARSET_UTF_8)); + w.write(TEXT); + w.newLine(); + w.close(); + } private void assertReadText(File f) throws Exception { InputStream ir = StreamUtils.inputStream(f); Index: lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java =================================================================== --- lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java (revision 1381159) +++ lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java (working copy) @@ -157,16 +157,16 @@ String expDate, String expBody) throws Exception { InputStream in = new FileInputStream(file); switch(fileType) { - case BZIP2: - in = csFactory.createCompressorInputStream(CompressorStreamFactory.BZIP2, in); - break; - case GZIP: - in = csFactory.createCompressorInputStream(CompressorStreamFactory.GZIP, in); - break; - case PLAIN: - break; // nothing to do - default: - assertFalse("Unknown file type!",true); //fail, should not happen + case BZIP2: + in = csFactory.createCompressorInputStream(CompressorStreamFactory.BZIP2, in); + break; + case GZIP: + in = csFactory.createCompressorInputStream(CompressorStreamFactory.GZIP, in); + break; + case PLAIN: + break; // nothing to do + default: + assertFalse("Unknown file type!",true); //fail, should not happen } BufferedReader br = new BufferedReader(new InputStreamReader(in, "utf-8")); try { Index: lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/StreamUtils.java =================================================================== --- lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/StreamUtils.java (revision 1381159) +++ lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/StreamUtils.java (working copy) @@ -37,43 +37,43 @@ */ public class StreamUtils { - /** Buffer size used across the benchmark package */ - public static final int BUFFER_SIZE = 1 << 16; // 64K - - /** File format type */ - public enum Type { - /** BZIP2 is automatically used for .bz2 and .bzip2 extensions. */ - BZIP2(CompressorStreamFactory.BZIP2), - /** GZIP is automatically used for .gz and .gzip extensions. */ - GZIP(CompressorStreamFactory.GZIP), - /** Plain text is used for anything which is not GZIP or BZIP. */ - PLAIN(null); - private final String csfType; - Type(String csfType) { - this.csfType = csfType; - } - private InputStream inputStream(InputStream in) throws IOException { - try { - return csfType==null ? in : new CompressorStreamFactory().createCompressorInputStream(csfType, in); - } catch (CompressorException e) { - IOException ioe = new IOException(e.getMessage()); - ioe.initCause(e); - throw ioe; } - } - private OutputStream outputStream(OutputStream os) throws IOException { - try { - return csfType==null ? os : new CompressorStreamFactory().createCompressorOutputStream(csfType, os); - } catch (CompressorException e) { - IOException ioe = new IOException(e.getMessage()); - ioe.initCause(e); - throw ioe; - } - } - } - + /** Buffer size used across the benchmark package */ + public static final int BUFFER_SIZE = 1 << 16; // 64K + + /** File format type */ + public enum Type { + /** BZIP2 is automatically used for .bz2 and .bzip2 extensions. */ + BZIP2(CompressorStreamFactory.BZIP2), + /** GZIP is automatically used for .gz and .gzip extensions. */ + GZIP(CompressorStreamFactory.GZIP), + /** Plain text is used for anything which is not GZIP or BZIP. */ + PLAIN(null); + private final String csfType; + Type(String csfType) { + this.csfType = csfType; + } + private InputStream inputStream(InputStream in) throws IOException { + try { + return csfType==null ? in : new CompressorStreamFactory().createCompressorInputStream(csfType, in); + } catch (CompressorException e) { + IOException ioe = new IOException(e.getMessage()); + ioe.initCause(e); + throw ioe; } + } + private OutputStream outputStream(OutputStream os) throws IOException { + try { + return csfType==null ? os : new CompressorStreamFactory().createCompressorOutputStream(csfType, os); + } catch (CompressorException e) { + IOException ioe = new IOException(e.getMessage()); + ioe.initCause(e); + throw ioe; + } + } + } + private static final Map extensionToType = new HashMap(); static { - // these in are lower case, we will lower case at the test as well + // these in are lower case, we will lower case at the test as well extensionToType.put(".bz2", Type.BZIP2); extensionToType.put(".bzip", Type.BZIP2); extensionToType.put(".gz", Type.GZIP); @@ -95,14 +95,14 @@ /** Return the type of the file, or null if unknown */ private static Type fileType(File file) { - Type type = null; + Type type = null; String fileName = file.getName(); int idx = fileName.lastIndexOf('.'); if (idx != -1) { type = extensionToType.get(fileName.substring(idx).toLowerCase(Locale.ROOT)); } return type==null ? Type.PLAIN : type; - } + } /** * Returns an {@link OutputStream} over the requested file, identifying Index: lucene/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java =================================================================== --- lucene/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java (revision 1381159) +++ lucene/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java (working copy) @@ -294,7 +294,7 @@ */ public SuggestWord[] suggestSimilar(Term term, int numSug, IndexReader ir, SuggestMode suggestMode) throws IOException { - return suggestSimilar(term, numSug, ir, suggestMode, this.accuracy); + return suggestSimilar(term, numSug, ir, suggestMode, this.accuracy); } /** Index: lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java =================================================================== --- lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java (revision 1381159) +++ lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java (working copy) @@ -283,7 +283,7 @@ */ public String[] suggestSimilar(String word, int numSug, IndexReader ir, String field, SuggestMode suggestMode) throws IOException { - return suggestSimilar(word, numSug, ir, field, suggestMode, this.accuracy); + return suggestSimilar(word, numSug, ir, field, suggestMode, this.accuracy); } /** Index: lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TernaryTreeNode.java =================================================================== --- lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TernaryTreeNode.java (revision 1381159) +++ lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TernaryTreeNode.java (working copy) @@ -23,20 +23,20 @@ public class TernaryTreeNode { /** the character stored by a node. */ - char splitchar; - /** a reference object to the node containing character smaller than this node's character. */ - TernaryTreeNode loKid; - /** - * a reference object to the node containing character next to this node's character as - * occurring in the inserted token. - */ - TernaryTreeNode eqKid; - /** a reference object to the node containing character higher than this node's character. */ - TernaryTreeNode hiKid; - /** - * used by leaf nodes to store the complete tokens to be added to suggest list while - * auto-completing the prefix. - */ - String token; - Object val; + char splitchar; + /** a reference object to the node containing character smaller than this node's character. */ + TernaryTreeNode loKid; + /** + * a reference object to the node containing character next to this node's character as + * occurring in the inserted token. + */ + TernaryTreeNode eqKid; + /** a reference object to the node containing character higher than this node's character. */ + TernaryTreeNode hiKid; + /** + * used by leaf nodes to store the complete tokens to be added to suggest list while + * auto-completing the prefix. + */ + String token; + Object val; } Index: lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/dict/UserDictionaryTest.java =================================================================== --- lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/dict/UserDictionaryTest.java (revision 1381159) +++ lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/dict/UserDictionaryTest.java (working copy) @@ -75,6 +75,6 @@ @Test public void testRead() throws IOException { UserDictionary dictionary = TestJapaneseTokenizer.readDict(); - assertNotNull(dictionary); + assertNotNull(dictionary); } } Index: lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java =================================================================== --- lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java (revision 1381159) +++ lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java (working copy) @@ -326,12 +326,12 @@ public void testSegmentation() throws Exception { // Skip tests for Michelle Kwan -- UniDic segments Kwan as ク ワン - // String input = "ミシェル・クワンが優勝しました。スペースステーションに行きます。うたがわしい。"; - // String[] surfaceForms = { - // "ミシェル", "・", "クワン", "が", "優勝", "し", "まし", "た", "。", - // "スペース", "ステーション", "に", "行き", "ます", "。", - // "うたがわしい", "。" - // }; + // String input = "ミシェル・クワンが優勝しました。スペースステーションに行きます。うたがわしい。"; + // String[] surfaceForms = { + // "ミシェル", "・", "クワン", "が", "優勝", "し", "まし", "た", "。", + // "スペース", "ステーション", "に", "行き", "ます", "。", + // "うたがわしい", "。" + // }; String input = "スペースステーションに行きます。うたがわしい。"; String[] surfaceForms = { "スペース", "ステーション", "に", "行き", "ます", "。", Index: lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/TokenInfoDictionaryBuilder.java =================================================================== --- lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/TokenInfoDictionaryBuilder.java (revision 1381159) +++ lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/TokenInfoDictionaryBuilder.java (working copy) @@ -174,26 +174,26 @@ /* * IPADIC features * - * 0 - surface - * 1 - left cost - * 2 - right cost - * 3 - word cost - * 4-9 - pos - * 10 - base form - * 11 - reading - * 12 - pronounciation + * 0 - surface + * 1 - left cost + * 2 - right cost + * 3 - word cost + * 4-9 - pos + * 10 - base form + * 11 - reading + * 12 - pronounciation * * UniDic features * - * 0 - surface - * 1 - left cost - * 2 - right cost - * 3 - word cost - * 4-9 - pos - * 10 - base form reading - * 11 - base form - * 12 - surface form - * 13 - surface reading + * 0 - surface + * 1 - left cost + * 2 - right cost + * 3 - word cost + * 4-9 - pos + * 10 - base form reading + * 11 - base form + * 12 - surface form + * 13 - surface reading */ public String[] formatEntry(String[] features) { @@ -221,7 +221,7 @@ } else { features2[11] = features[13]; features2[12] = features[13]; - } + } return features2; } } Index: lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/UnknownDictionaryBuilder.java =================================================================== --- lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/UnknownDictionaryBuilder.java (revision 1381159) +++ lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/UnknownDictionaryBuilder.java (working copy) @@ -107,22 +107,22 @@ continue; } - if(line.startsWith("0x")) { // Category mapping - String[] values = line.split(" ", 2); // Split only first space + if(line.startsWith("0x")) { // Category mapping + String[] values = line.split(" ", 2); // Split only first space if(!values[0].contains("..")) { int cp = Integer.decode(values[0]).intValue(); - dictionary.putCharacterCategory(cp, values[1]); + dictionary.putCharacterCategory(cp, values[1]); } else { String[] codePoints = values[0].split("\\.\\."); int cpFrom = Integer.decode(codePoints[0]).intValue(); int cpTo = Integer.decode(codePoints[1]).intValue(); for(int i = cpFrom; i <= cpTo; i++){ - dictionary.putCharacterCategory(i, values[1]); + dictionary.putCharacterCategory(i, values[1]); } } - } else { // Invoke definition + } else { // Invoke definition String[] values = line.split(" "); // Consecutive space is merged above String characterClassName = values[0]; int invoke = Integer.parseInt(values[1]); Index: lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UserDictionary.java =================================================================== --- lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UserDictionary.java (revision 1381159) +++ lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UserDictionary.java (working copy) @@ -246,7 +246,7 @@ return null; } - return allFeatures.split(INTERNAL_SEPARATOR); + return allFeatures.split(INTERNAL_SEPARATOR); } @@ -261,7 +261,7 @@ sb.append(CSVUtil.quoteEscape(feature)).append(","); } } else if (fields.length == 1) { // One feature doesn't need to escape value - sb.append(allFeatures[fields[0]]).append(","); + sb.append(allFeatures[fields[0]]).append(","); } else { for (int field : fields){ sb.append(CSVUtil.quoteEscape(allFeatures[field])).append(","); Index: lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/Dictionary.java =================================================================== --- lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/Dictionary.java (revision 1381159) +++ lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/Dictionary.java (working copy) @@ -28,21 +28,21 @@ /** * Get left id of specified word * @param wordId - * @return left id + * @return left id */ public int getLeftId(int wordId); /** * Get right id of specified word * @param wordId - * @return left id + * @return left id */ public int getRightId(int wordId); /** * Get word cost of specified word * @param wordId - * @return left id + * @return left id */ public int getWordCost(int wordId); Index: lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UnknownDictionary.java =================================================================== --- lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UnknownDictionary.java (revision 1381159) +++ lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UnknownDictionary.java (working copy) @@ -40,7 +40,7 @@ int length = 1; for (int i = 1; i < len; i++) { if (characterIdOfFirstCharacter == characterDefinition.getCharacterClass(text[offset+i])){ - length++; + length++; } else { break; } Index: lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/BinaryDictionary.java =================================================================== --- lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/BinaryDictionary.java (revision 1381159) +++ lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/BinaryDictionary.java (working copy) @@ -150,7 +150,7 @@ ref.length = targetMapOffsets[sourceId + 1] - ref.offset; } - @Override + @Override public int getLeftId(int wordId) { return buffer.getShort(wordId) >>> 3; } @@ -162,7 +162,7 @@ @Override public int getWordCost(int wordId) { - return buffer.getShort(wordId + 2); // Skip id + return buffer.getShort(wordId + 2); // Skip id } @Override Index: lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java =================================================================== --- lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java (revision 1381159) +++ lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java (working copy) @@ -292,12 +292,12 @@ if (!characterDefinition.isKanji((char) buffer.get(pos2))) { allKanji = false; break; - } + } } - if (allKanji) { // Process only Kanji keywords + if (allKanji) { // Process only Kanji keywords return (length - SEARCH_MODE_KANJI_LENGTH) * SEARCH_MODE_KANJI_PENALTY; } else if (length > SEARCH_MODE_OTHER_LENGTH) { - return (length - SEARCH_MODE_OTHER_LENGTH) * SEARCH_MODE_OTHER_PENALTY; + return (length - SEARCH_MODE_OTHER_LENGTH) * SEARCH_MODE_OTHER_PENALTY; } } return 0; @@ -807,7 +807,7 @@ } if (characterId == characterDefinition.getCharacterClass((char) ch) && isPunctuation((char) ch) == isPunct) { - unknownWordLength++; + unknownWordLength++; } else { break; } Index: lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/util/CSVUtil.java =================================================================== --- lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/util/CSVUtil.java (revision 1381159) +++ lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/util/CSVUtil.java (working copy) @@ -42,7 +42,7 @@ */ public static String[] parse(String line) { boolean insideQuote = false; - ArrayList result = new ArrayList(); + ArrayList result = new ArrayList(); int quoteCount = 0; StringBuilder sb = new StringBuilder(); for(int i = 0; i < line.length(); i++) { Index: lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/AbstractDictionary.java =================================================================== --- lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/AbstractDictionary.java (revision 1381159) +++ lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/AbstractDictionary.java (working copy) @@ -115,7 +115,7 @@ } int b0 = (buffer[0] & 0x0FF) - 161; // Code starts from A1, therefore subtract 0xA1=161 int b1 = (buffer[1] & 0x0FF) - 161; // There is no Chinese char for the first and last symbol. - // Therefore, each code page only has 16*6-2=94 characters. + // Therefore, each code page only has 16*6-2=94 characters. return (short) (b0 * 94 + b1); } catch (UnsupportedEncodingException e) { throw new RuntimeException(e); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/core/WordBreakTestUnicode_6_1_0.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/core/WordBreakTestUnicode_6_1_0.java (revision 1381159) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/core/WordBreakTestUnicode_6_1_0.java (working copy) @@ -44,3915 +44,3915 @@ public class WordBreakTestUnicode_6_1_0 extends BaseTokenStreamTestCase { public void test(Analyzer analyzer) throws Exception { - // ÷ 0001 ÷ 0001 ÷ # ÷ [0.2] (Other) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0001 ÷ 0001 ÷ # ÷ [0.2] (Other) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0001", new String[] { }); - // ÷ 0001 × 0308 ÷ 0001 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 0001 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u0001", new String[] { }); - // ÷ 0001 ÷ 000D ÷ # ÷ [0.2] (Other) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0001 ÷ 000D ÷ # ÷ [0.2] (Other) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\r", new String[] { }); - // ÷ 0001 × 0308 ÷ 000D ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 000D ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\r", new String[] { }); - // ÷ 0001 ÷ 000A ÷ # ÷ [0.2] (Other) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0001 ÷ 000A ÷ # ÷ [0.2] (Other) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\n", new String[] { }); - // ÷ 0001 × 0308 ÷ 000A ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 000A ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\n", new String[] { }); - // ÷ 0001 ÷ 000B ÷ # ÷ [0.2] (Other) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0001 ÷ 000B ÷ # ÷ [0.2] (Other) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u000B", new String[] { }); - // ÷ 0001 × 0308 ÷ 000B ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 000B ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u000B", new String[] { }); - // ÷ 0001 ÷ 3031 ÷ # ÷ [0.2] (Other) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0001 ÷ 3031 ÷ # ÷ [0.2] (Other) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u3031", new String[] { "\u3031" }); - // ÷ 0001 × 0308 ÷ 3031 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 3031 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u3031", new String[] { "\u3031" }); - // ÷ 0001 ÷ 0041 ÷ # ÷ [0.2] (Other) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0001 ÷ 0041 ÷ # ÷ [0.2] (Other) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0041", new String[] { "\u0041" }); - // ÷ 0001 × 0308 ÷ 0041 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 0041 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u0041", new String[] { "\u0041" }); - // ÷ 0001 ÷ 003A ÷ # ÷ [0.2] (Other) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0001 ÷ 003A ÷ # ÷ [0.2] (Other) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u003A", new String[] { }); - // ÷ 0001 × 0308 ÷ 003A ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 003A ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u003A", new String[] { }); - // ÷ 0001 ÷ 002C ÷ # ÷ [0.2] (Other) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0001 ÷ 002C ÷ # ÷ [0.2] (Other) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u002C", new String[] { }); - // ÷ 0001 × 0308 ÷ 002C ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 002C ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u002C", new String[] { }); - // ÷ 0001 ÷ 0027 ÷ # ÷ [0.2] (Other) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0001 ÷ 0027 ÷ # ÷ [0.2] (Other) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0027", new String[] { }); - // ÷ 0001 × 0308 ÷ 0027 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 0027 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u0027", new String[] { }); - // ÷ 0001 ÷ 0030 ÷ # ÷ [0.2] (Other) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0001 ÷ 0030 ÷ # ÷ [0.2] (Other) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0030", new String[] { "\u0030" }); - // ÷ 0001 × 0308 ÷ 0030 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 0030 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u0030", new String[] { "\u0030" }); - // ÷ 0001 ÷ 005F ÷ # ÷ [0.2] (Other) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0001 ÷ 005F ÷ # ÷ [0.2] (Other) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u005F", new String[] { }); - // ÷ 0001 × 0308 ÷ 005F ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 005F ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u005F", new String[] { }); - // ÷ 0001 × 00AD ÷ # ÷ [0.2] (Other) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0001 × 00AD ÷ # ÷ [0.2] (Other) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u00AD", new String[] { }); - // ÷ 0001 × 0308 × 00AD ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0001 × 0308 × 00AD ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u00AD", new String[] { }); - // ÷ 0001 × 0300 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0001 × 0300 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0300", new String[] { }); - // ÷ 0001 × 0308 × 0300 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0001 × 0308 × 0300 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u0300", new String[] { }); - // ÷ 0001 ÷ 0061 × 2060 ÷ # ÷ [0.2] (Other) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0001 ÷ 0061 × 2060 ÷ # ÷ [0.2] (Other) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0061\u2060", new String[] { "\u0061\u2060" }); - // ÷ 0001 × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u0061\u2060", new String[] { "\u0061\u2060" }); - // ÷ 0001 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] (Other) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0001 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] (Other) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0061\u003A", new String[] { "\u0061" }); - // ÷ 0001 × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u0061\u003A", new String[] { "\u0061" }); - // ÷ 0001 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] (Other) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0001 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] (Other) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0061\u0027", new String[] { "\u0061" }); - // ÷ 0001 × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u0061\u0027", new String[] { "\u0061" }); - // ÷ 0001 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] (Other) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0001 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] (Other) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0061\u0027\u2060", new String[] { "\u0061" }); - // ÷ 0001 × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u0061\u0027\u2060", new String[] { "\u0061" }); - // ÷ 0001 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] (Other) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0001 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] (Other) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0061\u002C", new String[] { "\u0061" }); - // ÷ 0001 × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u0061\u002C", new String[] { "\u0061" }); - // ÷ 0001 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] (Other) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0001 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] (Other) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0031\u003A", new String[] { "\u0031" }); - // ÷ 0001 × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u0031\u003A", new String[] { "\u0031" }); - // ÷ 0001 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] (Other) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0001 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] (Other) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0031\u0027", new String[] { "\u0031" }); - // ÷ 0001 × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u0031\u0027", new String[] { "\u0031" }); - // ÷ 0001 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] (Other) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0001 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] (Other) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0031\u002C", new String[] { "\u0031" }); - // ÷ 0001 × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u0031\u002C", new String[] { "\u0031" }); - // ÷ 0001 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] (Other) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0001 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] (Other) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0031\u002E\u2060", new String[] { "\u0031" }); - // ÷ 0001 × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0001 × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] (Other) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0001\u0308\u0031\u002E\u2060", new String[] { "\u0031" }); - // ÷ 000D ÷ 0001 ÷ # ÷ [0.2] (CR) ÷ [3.1] (Other) ÷ [0.3] + // ÷ 000D ÷ 0001 ÷ # ÷ [0.2] (CR) ÷ [3.1] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0001", new String[] { }); - // ÷ 000D ÷ 0308 ÷ 0001 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 0001 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u0001", new String[] { }); - // ÷ 000D ÷ 000D ÷ # ÷ [0.2] (CR) ÷ [3.1] (CR) ÷ [0.3] + // ÷ 000D ÷ 000D ÷ # ÷ [0.2] (CR) ÷ [3.1] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\r", new String[] { }); - // ÷ 000D ÷ 0308 ÷ 000D ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 000D ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\r", new String[] { }); - // ÷ 000D × 000A ÷ # ÷ [0.2] (CR) × [3.0] (LF) ÷ [0.3] + // ÷ 000D × 000A ÷ # ÷ [0.2] (CR) × [3.0] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\n", new String[] { }); - // ÷ 000D ÷ 0308 ÷ 000A ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 000A ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\n", new String[] { }); - // ÷ 000D ÷ 000B ÷ # ÷ [0.2] (CR) ÷ [3.1] (Newline) ÷ [0.3] + // ÷ 000D ÷ 000B ÷ # ÷ [0.2] (CR) ÷ [3.1] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u000B", new String[] { }); - // ÷ 000D ÷ 0308 ÷ 000B ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 000B ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u000B", new String[] { }); - // ÷ 000D ÷ 3031 ÷ # ÷ [0.2] (CR) ÷ [3.1] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 000D ÷ 3031 ÷ # ÷ [0.2] (CR) ÷ [3.1] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u3031", new String[] { "\u3031" }); - // ÷ 000D ÷ 0308 ÷ 3031 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 3031 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u3031", new String[] { "\u3031" }); - // ÷ 000D ÷ 0041 ÷ # ÷ [0.2] (CR) ÷ [3.1] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 000D ÷ 0041 ÷ # ÷ [0.2] (CR) ÷ [3.1] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0041", new String[] { "\u0041" }); - // ÷ 000D ÷ 0308 ÷ 0041 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 0041 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u0041", new String[] { "\u0041" }); - // ÷ 000D ÷ 003A ÷ # ÷ [0.2] (CR) ÷ [3.1] COLON (MidLetter) ÷ [0.3] + // ÷ 000D ÷ 003A ÷ # ÷ [0.2] (CR) ÷ [3.1] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u003A", new String[] { }); - // ÷ 000D ÷ 0308 ÷ 003A ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 003A ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u003A", new String[] { }); - // ÷ 000D ÷ 002C ÷ # ÷ [0.2] (CR) ÷ [3.1] COMMA (MidNum) ÷ [0.3] + // ÷ 000D ÷ 002C ÷ # ÷ [0.2] (CR) ÷ [3.1] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u002C", new String[] { }); - // ÷ 000D ÷ 0308 ÷ 002C ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 002C ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u002C", new String[] { }); - // ÷ 000D ÷ 0027 ÷ # ÷ [0.2] (CR) ÷ [3.1] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 000D ÷ 0027 ÷ # ÷ [0.2] (CR) ÷ [3.1] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0027", new String[] { }); - // ÷ 000D ÷ 0308 ÷ 0027 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 0027 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u0027", new String[] { }); - // ÷ 000D ÷ 0030 ÷ # ÷ [0.2] (CR) ÷ [3.1] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 000D ÷ 0030 ÷ # ÷ [0.2] (CR) ÷ [3.1] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0030", new String[] { "\u0030" }); - // ÷ 000D ÷ 0308 ÷ 0030 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 0030 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u0030", new String[] { "\u0030" }); - // ÷ 000D ÷ 005F ÷ # ÷ [0.2] (CR) ÷ [3.1] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 000D ÷ 005F ÷ # ÷ [0.2] (CR) ÷ [3.1] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u005F", new String[] { }); - // ÷ 000D ÷ 0308 ÷ 005F ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 005F ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u005F", new String[] { }); - // ÷ 000D ÷ 00AD ÷ # ÷ [0.2] (CR) ÷ [3.1] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 000D ÷ 00AD ÷ # ÷ [0.2] (CR) ÷ [3.1] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u00AD", new String[] { }); - // ÷ 000D ÷ 0308 × 00AD ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 000D ÷ 0308 × 00AD ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u00AD", new String[] { }); - // ÷ 000D ÷ 0300 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 000D ÷ 0300 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0300", new String[] { }); - // ÷ 000D ÷ 0308 × 0300 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 000D ÷ 0308 × 0300 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u0300", new String[] { }); - // ÷ 000D ÷ 0061 × 2060 ÷ # ÷ [0.2] (CR) ÷ [3.1] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 000D ÷ 0061 × 2060 ÷ # ÷ [0.2] (CR) ÷ [3.1] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0061\u2060", new String[] { "\u0061\u2060" }); - // ÷ 000D ÷ 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u0061\u2060", new String[] { "\u0061\u2060" }); - // ÷ 000D ÷ 0061 ÷ 003A ÷ # ÷ [0.2] (CR) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 000D ÷ 0061 ÷ 003A ÷ # ÷ [0.2] (CR) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0061\u003A", new String[] { "\u0061" }); - // ÷ 000D ÷ 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u0061\u003A", new String[] { "\u0061" }); - // ÷ 000D ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] (CR) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 000D ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] (CR) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0061\u0027", new String[] { "\u0061" }); - // ÷ 000D ÷ 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u0061\u0027", new String[] { "\u0061" }); - // ÷ 000D ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] (CR) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 000D ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] (CR) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0061\u0027\u2060", new String[] { "\u0061" }); - // ÷ 000D ÷ 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u0061\u0027\u2060", new String[] { "\u0061" }); - // ÷ 000D ÷ 0061 ÷ 002C ÷ # ÷ [0.2] (CR) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 000D ÷ 0061 ÷ 002C ÷ # ÷ [0.2] (CR) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0061\u002C", new String[] { "\u0061" }); - // ÷ 000D ÷ 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u0061\u002C", new String[] { "\u0061" }); - // ÷ 000D ÷ 0031 ÷ 003A ÷ # ÷ [0.2] (CR) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 000D ÷ 0031 ÷ 003A ÷ # ÷ [0.2] (CR) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0031\u003A", new String[] { "\u0031" }); - // ÷ 000D ÷ 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u0031\u003A", new String[] { "\u0031" }); - // ÷ 000D ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] (CR) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 000D ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] (CR) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0031\u0027", new String[] { "\u0031" }); - // ÷ 000D ÷ 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u0031\u0027", new String[] { "\u0031" }); - // ÷ 000D ÷ 0031 ÷ 002C ÷ # ÷ [0.2] (CR) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 000D ÷ 0031 ÷ 002C ÷ # ÷ [0.2] (CR) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0031\u002C", new String[] { "\u0031" }); - // ÷ 000D ÷ 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u0031\u002C", new String[] { "\u0031" }); - // ÷ 000D ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] (CR) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 000D ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] (CR) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0031\u002E\u2060", new String[] { "\u0031" }); - // ÷ 000D ÷ 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 000D ÷ 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] (CR) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\r\u0308\u0031\u002E\u2060", new String[] { "\u0031" }); - // ÷ 000A ÷ 0001 ÷ # ÷ [0.2] (LF) ÷ [3.1] (Other) ÷ [0.3] + // ÷ 000A ÷ 0001 ÷ # ÷ [0.2] (LF) ÷ [3.1] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0001", new String[] { }); - // ÷ 000A ÷ 0308 ÷ 0001 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 0001 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u0001", new String[] { }); - // ÷ 000A ÷ 000D ÷ # ÷ [0.2] (LF) ÷ [3.1] (CR) ÷ [0.3] + // ÷ 000A ÷ 000D ÷ # ÷ [0.2] (LF) ÷ [3.1] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\r", new String[] { }); - // ÷ 000A ÷ 0308 ÷ 000D ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 000D ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\r", new String[] { }); - // ÷ 000A ÷ 000A ÷ # ÷ [0.2] (LF) ÷ [3.1] (LF) ÷ [0.3] + // ÷ 000A ÷ 000A ÷ # ÷ [0.2] (LF) ÷ [3.1] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\n", new String[] { }); - // ÷ 000A ÷ 0308 ÷ 000A ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 000A ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\n", new String[] { }); - // ÷ 000A ÷ 000B ÷ # ÷ [0.2] (LF) ÷ [3.1] (Newline) ÷ [0.3] + // ÷ 000A ÷ 000B ÷ # ÷ [0.2] (LF) ÷ [3.1] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u000B", new String[] { }); - // ÷ 000A ÷ 0308 ÷ 000B ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 000B ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u000B", new String[] { }); - // ÷ 000A ÷ 3031 ÷ # ÷ [0.2] (LF) ÷ [3.1] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 000A ÷ 3031 ÷ # ÷ [0.2] (LF) ÷ [3.1] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u3031", new String[] { "\u3031" }); - // ÷ 000A ÷ 0308 ÷ 3031 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 3031 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u3031", new String[] { "\u3031" }); - // ÷ 000A ÷ 0041 ÷ # ÷ [0.2] (LF) ÷ [3.1] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 000A ÷ 0041 ÷ # ÷ [0.2] (LF) ÷ [3.1] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0041", new String[] { "\u0041" }); - // ÷ 000A ÷ 0308 ÷ 0041 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 0041 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u0041", new String[] { "\u0041" }); - // ÷ 000A ÷ 003A ÷ # ÷ [0.2] (LF) ÷ [3.1] COLON (MidLetter) ÷ [0.3] + // ÷ 000A ÷ 003A ÷ # ÷ [0.2] (LF) ÷ [3.1] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u003A", new String[] { }); - // ÷ 000A ÷ 0308 ÷ 003A ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 003A ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u003A", new String[] { }); - // ÷ 000A ÷ 002C ÷ # ÷ [0.2] (LF) ÷ [3.1] COMMA (MidNum) ÷ [0.3] + // ÷ 000A ÷ 002C ÷ # ÷ [0.2] (LF) ÷ [3.1] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u002C", new String[] { }); - // ÷ 000A ÷ 0308 ÷ 002C ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 002C ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u002C", new String[] { }); - // ÷ 000A ÷ 0027 ÷ # ÷ [0.2] (LF) ÷ [3.1] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 000A ÷ 0027 ÷ # ÷ [0.2] (LF) ÷ [3.1] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0027", new String[] { }); - // ÷ 000A ÷ 0308 ÷ 0027 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 0027 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u0027", new String[] { }); - // ÷ 000A ÷ 0030 ÷ # ÷ [0.2] (LF) ÷ [3.1] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 000A ÷ 0030 ÷ # ÷ [0.2] (LF) ÷ [3.1] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0030", new String[] { "\u0030" }); - // ÷ 000A ÷ 0308 ÷ 0030 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 0030 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u0030", new String[] { "\u0030" }); - // ÷ 000A ÷ 005F ÷ # ÷ [0.2] (LF) ÷ [3.1] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 000A ÷ 005F ÷ # ÷ [0.2] (LF) ÷ [3.1] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u005F", new String[] { }); - // ÷ 000A ÷ 0308 ÷ 005F ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 005F ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u005F", new String[] { }); - // ÷ 000A ÷ 00AD ÷ # ÷ [0.2] (LF) ÷ [3.1] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 000A ÷ 00AD ÷ # ÷ [0.2] (LF) ÷ [3.1] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u00AD", new String[] { }); - // ÷ 000A ÷ 0308 × 00AD ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 000A ÷ 0308 × 00AD ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u00AD", new String[] { }); - // ÷ 000A ÷ 0300 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 000A ÷ 0300 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0300", new String[] { }); - // ÷ 000A ÷ 0308 × 0300 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 000A ÷ 0308 × 0300 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u0300", new String[] { }); - // ÷ 000A ÷ 0061 × 2060 ÷ # ÷ [0.2] (LF) ÷ [3.1] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 000A ÷ 0061 × 2060 ÷ # ÷ [0.2] (LF) ÷ [3.1] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0061\u2060", new String[] { "\u0061\u2060" }); - // ÷ 000A ÷ 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u0061\u2060", new String[] { "\u0061\u2060" }); - // ÷ 000A ÷ 0061 ÷ 003A ÷ # ÷ [0.2] (LF) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 000A ÷ 0061 ÷ 003A ÷ # ÷ [0.2] (LF) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0061\u003A", new String[] { "\u0061" }); - // ÷ 000A ÷ 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u0061\u003A", new String[] { "\u0061" }); - // ÷ 000A ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] (LF) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 000A ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] (LF) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0061\u0027", new String[] { "\u0061" }); - // ÷ 000A ÷ 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u0061\u0027", new String[] { "\u0061" }); - // ÷ 000A ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] (LF) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 000A ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] (LF) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0061\u0027\u2060", new String[] { "\u0061" }); - // ÷ 000A ÷ 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u0061\u0027\u2060", new String[] { "\u0061" }); - // ÷ 000A ÷ 0061 ÷ 002C ÷ # ÷ [0.2] (LF) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 000A ÷ 0061 ÷ 002C ÷ # ÷ [0.2] (LF) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0061\u002C", new String[] { "\u0061" }); - // ÷ 000A ÷ 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u0061\u002C", new String[] { "\u0061" }); - // ÷ 000A ÷ 0031 ÷ 003A ÷ # ÷ [0.2] (LF) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 000A ÷ 0031 ÷ 003A ÷ # ÷ [0.2] (LF) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0031\u003A", new String[] { "\u0031" }); - // ÷ 000A ÷ 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u0031\u003A", new String[] { "\u0031" }); - // ÷ 000A ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] (LF) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 000A ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] (LF) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0031\u0027", new String[] { "\u0031" }); - // ÷ 000A ÷ 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u0031\u0027", new String[] { "\u0031" }); - // ÷ 000A ÷ 0031 ÷ 002C ÷ # ÷ [0.2] (LF) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 000A ÷ 0031 ÷ 002C ÷ # ÷ [0.2] (LF) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0031\u002C", new String[] { "\u0031" }); - // ÷ 000A ÷ 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u0031\u002C", new String[] { "\u0031" }); - // ÷ 000A ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] (LF) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 000A ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] (LF) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0031\u002E\u2060", new String[] { "\u0031" }); - // ÷ 000A ÷ 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 000A ÷ 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] (LF) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\n\u0308\u0031\u002E\u2060", new String[] { "\u0031" }); - // ÷ 000B ÷ 0001 ÷ # ÷ [0.2] (Newline) ÷ [3.1] (Other) ÷ [0.3] + // ÷ 000B ÷ 0001 ÷ # ÷ [0.2] (Newline) ÷ [3.1] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0001", new String[] { }); - // ÷ 000B ÷ 0308 ÷ 0001 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 0001 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u0001", new String[] { }); - // ÷ 000B ÷ 000D ÷ # ÷ [0.2] (Newline) ÷ [3.1] (CR) ÷ [0.3] + // ÷ 000B ÷ 000D ÷ # ÷ [0.2] (Newline) ÷ [3.1] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\r", new String[] { }); - // ÷ 000B ÷ 0308 ÷ 000D ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 000D ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\r", new String[] { }); - // ÷ 000B ÷ 000A ÷ # ÷ [0.2] (Newline) ÷ [3.1] (LF) ÷ [0.3] + // ÷ 000B ÷ 000A ÷ # ÷ [0.2] (Newline) ÷ [3.1] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\n", new String[] { }); - // ÷ 000B ÷ 0308 ÷ 000A ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 000A ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\n", new String[] { }); - // ÷ 000B ÷ 000B ÷ # ÷ [0.2] (Newline) ÷ [3.1] (Newline) ÷ [0.3] + // ÷ 000B ÷ 000B ÷ # ÷ [0.2] (Newline) ÷ [3.1] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u000B", new String[] { }); - // ÷ 000B ÷ 0308 ÷ 000B ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 000B ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u000B", new String[] { }); - // ÷ 000B ÷ 3031 ÷ # ÷ [0.2] (Newline) ÷ [3.1] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 000B ÷ 3031 ÷ # ÷ [0.2] (Newline) ÷ [3.1] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u3031", new String[] { "\u3031" }); - // ÷ 000B ÷ 0308 ÷ 3031 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 3031 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u3031", new String[] { "\u3031" }); - // ÷ 000B ÷ 0041 ÷ # ÷ [0.2] (Newline) ÷ [3.1] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 000B ÷ 0041 ÷ # ÷ [0.2] (Newline) ÷ [3.1] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0041", new String[] { "\u0041" }); - // ÷ 000B ÷ 0308 ÷ 0041 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 0041 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u0041", new String[] { "\u0041" }); - // ÷ 000B ÷ 003A ÷ # ÷ [0.2] (Newline) ÷ [3.1] COLON (MidLetter) ÷ [0.3] + // ÷ 000B ÷ 003A ÷ # ÷ [0.2] (Newline) ÷ [3.1] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u003A", new String[] { }); - // ÷ 000B ÷ 0308 ÷ 003A ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 003A ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u003A", new String[] { }); - // ÷ 000B ÷ 002C ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMMA (MidNum) ÷ [0.3] + // ÷ 000B ÷ 002C ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u002C", new String[] { }); - // ÷ 000B ÷ 0308 ÷ 002C ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 002C ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u002C", new String[] { }); - // ÷ 000B ÷ 0027 ÷ # ÷ [0.2] (Newline) ÷ [3.1] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 000B ÷ 0027 ÷ # ÷ [0.2] (Newline) ÷ [3.1] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0027", new String[] { }); - // ÷ 000B ÷ 0308 ÷ 0027 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 0027 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u0027", new String[] { }); - // ÷ 000B ÷ 0030 ÷ # ÷ [0.2] (Newline) ÷ [3.1] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 000B ÷ 0030 ÷ # ÷ [0.2] (Newline) ÷ [3.1] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0030", new String[] { "\u0030" }); - // ÷ 000B ÷ 0308 ÷ 0030 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 0030 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u0030", new String[] { "\u0030" }); - // ÷ 000B ÷ 005F ÷ # ÷ [0.2] (Newline) ÷ [3.1] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 000B ÷ 005F ÷ # ÷ [0.2] (Newline) ÷ [3.1] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u005F", new String[] { }); - // ÷ 000B ÷ 0308 ÷ 005F ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 005F ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u005F", new String[] { }); - // ÷ 000B ÷ 00AD ÷ # ÷ [0.2] (Newline) ÷ [3.1] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 000B ÷ 00AD ÷ # ÷ [0.2] (Newline) ÷ [3.1] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u00AD", new String[] { }); - // ÷ 000B ÷ 0308 × 00AD ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 000B ÷ 0308 × 00AD ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u00AD", new String[] { }); - // ÷ 000B ÷ 0300 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 000B ÷ 0300 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0300", new String[] { }); - // ÷ 000B ÷ 0308 × 0300 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 000B ÷ 0308 × 0300 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u0300", new String[] { }); - // ÷ 000B ÷ 0061 × 2060 ÷ # ÷ [0.2] (Newline) ÷ [3.1] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 000B ÷ 0061 × 2060 ÷ # ÷ [0.2] (Newline) ÷ [3.1] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0061\u2060", new String[] { "\u0061\u2060" }); - // ÷ 000B ÷ 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u0061\u2060", new String[] { "\u0061\u2060" }); - // ÷ 000B ÷ 0061 ÷ 003A ÷ # ÷ [0.2] (Newline) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 000B ÷ 0061 ÷ 003A ÷ # ÷ [0.2] (Newline) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0061\u003A", new String[] { "\u0061" }); - // ÷ 000B ÷ 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u0061\u003A", new String[] { "\u0061" }); - // ÷ 000B ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] (Newline) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 000B ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] (Newline) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0061\u0027", new String[] { "\u0061" }); - // ÷ 000B ÷ 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u0061\u0027", new String[] { "\u0061" }); - // ÷ 000B ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] (Newline) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 000B ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] (Newline) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0061\u0027\u2060", new String[] { "\u0061" }); - // ÷ 000B ÷ 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u0061\u0027\u2060", new String[] { "\u0061" }); - // ÷ 000B ÷ 0061 ÷ 002C ÷ # ÷ [0.2] (Newline) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 000B ÷ 0061 ÷ 002C ÷ # ÷ [0.2] (Newline) ÷ [3.1] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0061\u002C", new String[] { "\u0061" }); - // ÷ 000B ÷ 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u0061\u002C", new String[] { "\u0061" }); - // ÷ 000B ÷ 0031 ÷ 003A ÷ # ÷ [0.2] (Newline) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 000B ÷ 0031 ÷ 003A ÷ # ÷ [0.2] (Newline) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0031\u003A", new String[] { "\u0031" }); - // ÷ 000B ÷ 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u0031\u003A", new String[] { "\u0031" }); - // ÷ 000B ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] (Newline) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 000B ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] (Newline) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0031\u0027", new String[] { "\u0031" }); - // ÷ 000B ÷ 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u0031\u0027", new String[] { "\u0031" }); - // ÷ 000B ÷ 0031 ÷ 002C ÷ # ÷ [0.2] (Newline) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 000B ÷ 0031 ÷ 002C ÷ # ÷ [0.2] (Newline) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0031\u002C", new String[] { "\u0031" }); - // ÷ 000B ÷ 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u0031\u002C", new String[] { "\u0031" }); - // ÷ 000B ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] (Newline) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 000B ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] (Newline) ÷ [3.1] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0031\u002E\u2060", new String[] { "\u0031" }); - // ÷ 000B ÷ 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 000B ÷ 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] (Newline) ÷ [3.1] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u000B\u0308\u0031\u002E\u2060", new String[] { "\u0031" }); - // ÷ 3031 ÷ 0001 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 3031 ÷ 0001 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0001", new String[] { "\u3031" }); - // ÷ 3031 × 0308 ÷ 0001 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 3031 × 0308 ÷ 0001 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u0001", new String[] { "\u3031\u0308" }); - // ÷ 3031 ÷ 000D ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 3031 ÷ 000D ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\r", new String[] { "\u3031" }); - // ÷ 3031 × 0308 ÷ 000D ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 3031 × 0308 ÷ 000D ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\r", new String[] { "\u3031\u0308" }); - // ÷ 3031 ÷ 000A ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 3031 ÷ 000A ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\n", new String[] { "\u3031" }); - // ÷ 3031 × 0308 ÷ 000A ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 3031 × 0308 ÷ 000A ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\n", new String[] { "\u3031\u0308" }); - // ÷ 3031 ÷ 000B ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 3031 ÷ 000B ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u000B", new String[] { "\u3031" }); - // ÷ 3031 × 0308 ÷ 000B ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 3031 × 0308 ÷ 000B ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u000B", new String[] { "\u3031\u0308" }); - // ÷ 3031 × 3031 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [13.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 3031 × 3031 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [13.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u3031", new String[] { "\u3031\u3031" }); - // ÷ 3031 × 0308 × 3031 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 3031 × 0308 × 3031 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u3031", new String[] { "\u3031\u0308\u3031" }); - // ÷ 3031 ÷ 0041 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 3031 ÷ 0041 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0041", new String[] { "\u3031", "\u0041" }); - // ÷ 3031 × 0308 ÷ 0041 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 3031 × 0308 ÷ 0041 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u0041", new String[] { "\u3031\u0308", "\u0041" }); - // ÷ 3031 ÷ 003A ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 3031 ÷ 003A ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u003A", new String[] { "\u3031" }); - // ÷ 3031 × 0308 ÷ 003A ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 3031 × 0308 ÷ 003A ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u003A", new String[] { "\u3031\u0308" }); - // ÷ 3031 ÷ 002C ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 3031 ÷ 002C ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u002C", new String[] { "\u3031" }); - // ÷ 3031 × 0308 ÷ 002C ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 3031 × 0308 ÷ 002C ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u002C", new String[] { "\u3031\u0308" }); - // ÷ 3031 ÷ 0027 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 3031 ÷ 0027 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0027", new String[] { "\u3031" }); - // ÷ 3031 × 0308 ÷ 0027 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 3031 × 0308 ÷ 0027 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u0027", new String[] { "\u3031\u0308" }); - // ÷ 3031 ÷ 0030 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 3031 ÷ 0030 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0030", new String[] { "\u3031", "\u0030" }); - // ÷ 3031 × 0308 ÷ 0030 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 3031 × 0308 ÷ 0030 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u0030", new String[] { "\u3031\u0308", "\u0030" }); - // ÷ 3031 × 005F ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 3031 × 005F ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u005F", new String[] { "\u3031\u005F" }); - // ÷ 3031 × 0308 × 005F ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 3031 × 0308 × 005F ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u005F", new String[] { "\u3031\u0308\u005F" }); - // ÷ 3031 × 00AD ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 3031 × 00AD ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u00AD", new String[] { "\u3031\u00AD" }); - // ÷ 3031 × 0308 × 00AD ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 3031 × 0308 × 00AD ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u00AD", new String[] { "\u3031\u0308\u00AD" }); - // ÷ 3031 × 0300 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 3031 × 0300 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0300", new String[] { "\u3031\u0300" }); - // ÷ 3031 × 0308 × 0300 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 3031 × 0308 × 0300 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u0300", new String[] { "\u3031\u0308\u0300" }); - // ÷ 3031 ÷ 0061 × 2060 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 3031 ÷ 0061 × 2060 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0061\u2060", new String[] { "\u3031", "\u0061\u2060" }); - // ÷ 3031 × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 3031 × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u0061\u2060", new String[] { "\u3031\u0308", "\u0061\u2060" }); - // ÷ 3031 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 3031 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0061\u003A", new String[] { "\u3031", "\u0061" }); - // ÷ 3031 × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 3031 × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u0061\u003A", new String[] { "\u3031\u0308", "\u0061" }); - // ÷ 3031 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 3031 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0061\u0027", new String[] { "\u3031", "\u0061" }); - // ÷ 3031 × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 3031 × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u0061\u0027", new String[] { "\u3031\u0308", "\u0061" }); - // ÷ 3031 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 3031 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0061\u0027\u2060", new String[] { "\u3031", "\u0061" }); - // ÷ 3031 × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 3031 × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u0061\u0027\u2060", new String[] { "\u3031\u0308", "\u0061" }); - // ÷ 3031 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 3031 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0061\u002C", new String[] { "\u3031", "\u0061" }); - // ÷ 3031 × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 3031 × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u0061\u002C", new String[] { "\u3031\u0308", "\u0061" }); - // ÷ 3031 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 3031 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0031\u003A", new String[] { "\u3031", "\u0031" }); - // ÷ 3031 × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 3031 × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u0031\u003A", new String[] { "\u3031\u0308", "\u0031" }); - // ÷ 3031 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 3031 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0031\u0027", new String[] { "\u3031", "\u0031" }); - // ÷ 3031 × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 3031 × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u0031\u0027", new String[] { "\u3031\u0308", "\u0031" }); - // ÷ 3031 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 3031 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0031\u002C", new String[] { "\u3031", "\u0031" }); - // ÷ 3031 × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 3031 × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u0031\u002C", new String[] { "\u3031\u0308", "\u0031" }); - // ÷ 3031 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 3031 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0031\u002E\u2060", new String[] { "\u3031", "\u0031" }); - // ÷ 3031 × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 3031 × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] VERTICAL KANA REPEAT MARK (Katakana) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u3031\u0308\u0031\u002E\u2060", new String[] { "\u3031\u0308", "\u0031" }); - // ÷ 0041 ÷ 0001 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0041 ÷ 0001 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0001", new String[] { "\u0041" }); - // ÷ 0041 × 0308 ÷ 0001 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0041 × 0308 ÷ 0001 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u0001", new String[] { "\u0041\u0308" }); - // ÷ 0041 ÷ 000D ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0041 ÷ 000D ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\r", new String[] { "\u0041" }); - // ÷ 0041 × 0308 ÷ 000D ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0041 × 0308 ÷ 000D ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\r", new String[] { "\u0041\u0308" }); - // ÷ 0041 ÷ 000A ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0041 ÷ 000A ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\n", new String[] { "\u0041" }); - // ÷ 0041 × 0308 ÷ 000A ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0041 × 0308 ÷ 000A ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\n", new String[] { "\u0041\u0308" }); - // ÷ 0041 ÷ 000B ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0041 ÷ 000B ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u000B", new String[] { "\u0041" }); - // ÷ 0041 × 0308 ÷ 000B ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0041 × 0308 ÷ 000B ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u000B", new String[] { "\u0041\u0308" }); - // ÷ 0041 ÷ 3031 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0041 ÷ 3031 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u3031", new String[] { "\u0041", "\u3031" }); - // ÷ 0041 × 0308 ÷ 3031 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0041 × 0308 ÷ 3031 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u3031", new String[] { "\u0041\u0308", "\u3031" }); - // ÷ 0041 × 0041 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [5.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0041 × 0041 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [5.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0041", new String[] { "\u0041\u0041" }); - // ÷ 0041 × 0308 × 0041 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0041 × 0308 × 0041 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u0041", new String[] { "\u0041\u0308\u0041" }); - // ÷ 0041 ÷ 003A ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0041 ÷ 003A ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u003A", new String[] { "\u0041" }); - // ÷ 0041 × 0308 ÷ 003A ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0041 × 0308 ÷ 003A ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u003A", new String[] { "\u0041\u0308" }); - // ÷ 0041 ÷ 002C ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0041 ÷ 002C ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u002C", new String[] { "\u0041" }); - // ÷ 0041 × 0308 ÷ 002C ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0041 × 0308 ÷ 002C ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u002C", new String[] { "\u0041\u0308" }); - // ÷ 0041 ÷ 0027 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0041 ÷ 0027 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0027", new String[] { "\u0041" }); - // ÷ 0041 × 0308 ÷ 0027 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0041 × 0308 ÷ 0027 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u0027", new String[] { "\u0041\u0308" }); - // ÷ 0041 × 0030 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [9.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0041 × 0030 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [9.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0030", new String[] { "\u0041\u0030" }); - // ÷ 0041 × 0308 × 0030 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0041 × 0308 × 0030 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u0030", new String[] { "\u0041\u0308\u0030" }); - // ÷ 0041 × 005F ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0041 × 005F ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u005F", new String[] { "\u0041\u005F" }); - // ÷ 0041 × 0308 × 005F ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0041 × 0308 × 005F ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u005F", new String[] { "\u0041\u0308\u005F" }); - // ÷ 0041 × 00AD ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0041 × 00AD ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u00AD", new String[] { "\u0041\u00AD" }); - // ÷ 0041 × 0308 × 00AD ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0041 × 0308 × 00AD ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u00AD", new String[] { "\u0041\u0308\u00AD" }); - // ÷ 0041 × 0300 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0041 × 0300 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0300", new String[] { "\u0041\u0300" }); - // ÷ 0041 × 0308 × 0300 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0041 × 0308 × 0300 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u0300", new String[] { "\u0041\u0308\u0300" }); - // ÷ 0041 × 0061 × 2060 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0041 × 0061 × 2060 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0061\u2060", new String[] { "\u0041\u0061\u2060" }); - // ÷ 0041 × 0308 × 0061 × 2060 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0041 × 0308 × 0061 × 2060 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u0061\u2060", new String[] { "\u0041\u0308\u0061\u2060" }); - // ÷ 0041 × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0041 × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0061\u003A", new String[] { "\u0041\u0061" }); - // ÷ 0041 × 0308 × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0041 × 0308 × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u0061\u003A", new String[] { "\u0041\u0308\u0061" }); - // ÷ 0041 × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0041 × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0061\u0027", new String[] { "\u0041\u0061" }); - // ÷ 0041 × 0308 × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0041 × 0308 × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u0061\u0027", new String[] { "\u0041\u0308\u0061" }); - // ÷ 0041 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0041 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0061\u0027\u2060", new String[] { "\u0041\u0061" }); - // ÷ 0041 × 0308 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0041 × 0308 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u0061\u0027\u2060", new String[] { "\u0041\u0308\u0061" }); - // ÷ 0041 × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0041 × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0061\u002C", new String[] { "\u0041\u0061" }); - // ÷ 0041 × 0308 × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0041 × 0308 × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u0061\u002C", new String[] { "\u0041\u0308\u0061" }); - // ÷ 0041 × 0031 ÷ 003A ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0041 × 0031 ÷ 003A ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0031\u003A", new String[] { "\u0041\u0031" }); - // ÷ 0041 × 0308 × 0031 ÷ 003A ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0041 × 0308 × 0031 ÷ 003A ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u0031\u003A", new String[] { "\u0041\u0308\u0031" }); - // ÷ 0041 × 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0041 × 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0031\u0027", new String[] { "\u0041\u0031" }); - // ÷ 0041 × 0308 × 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0041 × 0308 × 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u0031\u0027", new String[] { "\u0041\u0308\u0031" }); - // ÷ 0041 × 0031 ÷ 002C ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0041 × 0031 ÷ 002C ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0031\u002C", new String[] { "\u0041\u0031" }); - // ÷ 0041 × 0308 × 0031 ÷ 002C ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0041 × 0308 × 0031 ÷ 002C ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u0031\u002C", new String[] { "\u0041\u0308\u0031" }); - // ÷ 0041 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0041 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0031\u002E\u2060", new String[] { "\u0041\u0031" }); - // ÷ 0041 × 0308 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0041 × 0308 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN CAPITAL LETTER A (ALetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0041\u0308\u0031\u002E\u2060", new String[] { "\u0041\u0308\u0031" }); - // ÷ 003A ÷ 0001 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 003A ÷ 0001 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0001", new String[] { }); - // ÷ 003A × 0308 ÷ 0001 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 003A × 0308 ÷ 0001 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u0001", new String[] { }); - // ÷ 003A ÷ 000D ÷ # ÷ [0.2] COLON (MidLetter) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 003A ÷ 000D ÷ # ÷ [0.2] COLON (MidLetter) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\r", new String[] { }); - // ÷ 003A × 0308 ÷ 000D ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 003A × 0308 ÷ 000D ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\r", new String[] { }); - // ÷ 003A ÷ 000A ÷ # ÷ [0.2] COLON (MidLetter) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 003A ÷ 000A ÷ # ÷ [0.2] COLON (MidLetter) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\n", new String[] { }); - // ÷ 003A × 0308 ÷ 000A ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 003A × 0308 ÷ 000A ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\n", new String[] { }); - // ÷ 003A ÷ 000B ÷ # ÷ [0.2] COLON (MidLetter) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 003A ÷ 000B ÷ # ÷ [0.2] COLON (MidLetter) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u000B", new String[] { }); - // ÷ 003A × 0308 ÷ 000B ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 003A × 0308 ÷ 000B ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u000B", new String[] { }); - // ÷ 003A ÷ 3031 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 003A ÷ 3031 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u3031", new String[] { "\u3031" }); - // ÷ 003A × 0308 ÷ 3031 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 003A × 0308 ÷ 3031 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u3031", new String[] { "\u3031" }); - // ÷ 003A ÷ 0041 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 003A ÷ 0041 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0041", new String[] { "\u0041" }); - // ÷ 003A × 0308 ÷ 0041 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 003A × 0308 ÷ 0041 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u0041", new String[] { "\u0041" }); - // ÷ 003A ÷ 003A ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 003A ÷ 003A ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u003A", new String[] { }); - // ÷ 003A × 0308 ÷ 003A ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 003A × 0308 ÷ 003A ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u003A", new String[] { }); - // ÷ 003A ÷ 002C ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 003A ÷ 002C ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u002C", new String[] { }); - // ÷ 003A × 0308 ÷ 002C ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 003A × 0308 ÷ 002C ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u002C", new String[] { }); - // ÷ 003A ÷ 0027 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 003A ÷ 0027 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0027", new String[] { }); - // ÷ 003A × 0308 ÷ 0027 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 003A × 0308 ÷ 0027 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u0027", new String[] { }); - // ÷ 003A ÷ 0030 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 003A ÷ 0030 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0030", new String[] { "\u0030" }); - // ÷ 003A × 0308 ÷ 0030 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 003A × 0308 ÷ 0030 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u0030", new String[] { "\u0030" }); - // ÷ 003A ÷ 005F ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 003A ÷ 005F ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u005F", new String[] { }); - // ÷ 003A × 0308 ÷ 005F ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 003A × 0308 ÷ 005F ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u005F", new String[] { }); - // ÷ 003A × 00AD ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 003A × 00AD ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u00AD", new String[] { }); - // ÷ 003A × 0308 × 00AD ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 003A × 0308 × 00AD ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u00AD", new String[] { }); - // ÷ 003A × 0300 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 003A × 0300 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0300", new String[] { }); - // ÷ 003A × 0308 × 0300 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 003A × 0308 × 0300 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u0300", new String[] { }); - // ÷ 003A ÷ 0061 × 2060 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 003A ÷ 0061 × 2060 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0061\u2060", new String[] { "\u0061\u2060" }); - // ÷ 003A × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 003A × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u0061\u2060", new String[] { "\u0061\u2060" }); - // ÷ 003A ÷ 0061 ÷ 003A ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 003A ÷ 0061 ÷ 003A ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0061\u003A", new String[] { "\u0061" }); - // ÷ 003A × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 003A × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u0061\u003A", new String[] { "\u0061" }); - // ÷ 003A ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 003A ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0061\u0027", new String[] { "\u0061" }); - // ÷ 003A × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 003A × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u0061\u0027", new String[] { "\u0061" }); - // ÷ 003A ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 003A ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0061\u0027\u2060", new String[] { "\u0061" }); - // ÷ 003A × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 003A × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u0061\u0027\u2060", new String[] { "\u0061" }); - // ÷ 003A ÷ 0061 ÷ 002C ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 003A ÷ 0061 ÷ 002C ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0061\u002C", new String[] { "\u0061" }); - // ÷ 003A × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 003A × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u0061\u002C", new String[] { "\u0061" }); - // ÷ 003A ÷ 0031 ÷ 003A ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 003A ÷ 0031 ÷ 003A ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0031\u003A", new String[] { "\u0031" }); - // ÷ 003A × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 003A × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u0031\u003A", new String[] { "\u0031" }); - // ÷ 003A ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 003A ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0031\u0027", new String[] { "\u0031" }); - // ÷ 003A × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 003A × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u0031\u0027", new String[] { "\u0031" }); - // ÷ 003A ÷ 0031 ÷ 002C ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 003A ÷ 0031 ÷ 002C ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0031\u002C", new String[] { "\u0031" }); - // ÷ 003A × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 003A × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u0031\u002C", new String[] { "\u0031" }); - // ÷ 003A ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 003A ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0031\u002E\u2060", new String[] { "\u0031" }); - // ÷ 003A × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 003A × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u003A\u0308\u0031\u002E\u2060", new String[] { "\u0031" }); - // ÷ 002C ÷ 0001 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 002C ÷ 0001 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0001", new String[] { }); - // ÷ 002C × 0308 ÷ 0001 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 002C × 0308 ÷ 0001 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u0001", new String[] { }); - // ÷ 002C ÷ 000D ÷ # ÷ [0.2] COMMA (MidNum) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 002C ÷ 000D ÷ # ÷ [0.2] COMMA (MidNum) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\r", new String[] { }); - // ÷ 002C × 0308 ÷ 000D ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 002C × 0308 ÷ 000D ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\r", new String[] { }); - // ÷ 002C ÷ 000A ÷ # ÷ [0.2] COMMA (MidNum) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 002C ÷ 000A ÷ # ÷ [0.2] COMMA (MidNum) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\n", new String[] { }); - // ÷ 002C × 0308 ÷ 000A ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 002C × 0308 ÷ 000A ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\n", new String[] { }); - // ÷ 002C ÷ 000B ÷ # ÷ [0.2] COMMA (MidNum) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 002C ÷ 000B ÷ # ÷ [0.2] COMMA (MidNum) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u000B", new String[] { }); - // ÷ 002C × 0308 ÷ 000B ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 002C × 0308 ÷ 000B ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u000B", new String[] { }); - // ÷ 002C ÷ 3031 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 002C ÷ 3031 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u3031", new String[] { "\u3031" }); - // ÷ 002C × 0308 ÷ 3031 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 002C × 0308 ÷ 3031 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u3031", new String[] { "\u3031" }); - // ÷ 002C ÷ 0041 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 002C ÷ 0041 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0041", new String[] { "\u0041" }); - // ÷ 002C × 0308 ÷ 0041 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 002C × 0308 ÷ 0041 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u0041", new String[] { "\u0041" }); - // ÷ 002C ÷ 003A ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 002C ÷ 003A ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u003A", new String[] { }); - // ÷ 002C × 0308 ÷ 003A ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 002C × 0308 ÷ 003A ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u003A", new String[] { }); - // ÷ 002C ÷ 002C ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 002C ÷ 002C ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u002C", new String[] { }); - // ÷ 002C × 0308 ÷ 002C ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 002C × 0308 ÷ 002C ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u002C", new String[] { }); - // ÷ 002C ÷ 0027 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 002C ÷ 0027 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0027", new String[] { }); - // ÷ 002C × 0308 ÷ 0027 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 002C × 0308 ÷ 0027 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u0027", new String[] { }); - // ÷ 002C ÷ 0030 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 002C ÷ 0030 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0030", new String[] { "\u0030" }); - // ÷ 002C × 0308 ÷ 0030 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 002C × 0308 ÷ 0030 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u0030", new String[] { "\u0030" }); - // ÷ 002C ÷ 005F ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 002C ÷ 005F ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u005F", new String[] { }); - // ÷ 002C × 0308 ÷ 005F ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 002C × 0308 ÷ 005F ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u005F", new String[] { }); - // ÷ 002C × 00AD ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 002C × 00AD ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u00AD", new String[] { }); - // ÷ 002C × 0308 × 00AD ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 002C × 0308 × 00AD ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u00AD", new String[] { }); - // ÷ 002C × 0300 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 002C × 0300 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0300", new String[] { }); - // ÷ 002C × 0308 × 0300 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 002C × 0308 × 0300 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u0300", new String[] { }); - // ÷ 002C ÷ 0061 × 2060 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 002C ÷ 0061 × 2060 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0061\u2060", new String[] { "\u0061\u2060" }); - // ÷ 002C × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 002C × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u0061\u2060", new String[] { "\u0061\u2060" }); - // ÷ 002C ÷ 0061 ÷ 003A ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 002C ÷ 0061 ÷ 003A ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0061\u003A", new String[] { "\u0061" }); - // ÷ 002C × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 002C × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u0061\u003A", new String[] { "\u0061" }); - // ÷ 002C ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 002C ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0061\u0027", new String[] { "\u0061" }); - // ÷ 002C × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 002C × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u0061\u0027", new String[] { "\u0061" }); - // ÷ 002C ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 002C ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0061\u0027\u2060", new String[] { "\u0061" }); - // ÷ 002C × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 002C × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u0061\u0027\u2060", new String[] { "\u0061" }); - // ÷ 002C ÷ 0061 ÷ 002C ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 002C ÷ 0061 ÷ 002C ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0061\u002C", new String[] { "\u0061" }); - // ÷ 002C × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 002C × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u0061\u002C", new String[] { "\u0061" }); - // ÷ 002C ÷ 0031 ÷ 003A ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 002C ÷ 0031 ÷ 003A ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0031\u003A", new String[] { "\u0031" }); - // ÷ 002C × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 002C × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u0031\u003A", new String[] { "\u0031" }); - // ÷ 002C ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 002C ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0031\u0027", new String[] { "\u0031" }); - // ÷ 002C × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 002C × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u0031\u0027", new String[] { "\u0031" }); - // ÷ 002C ÷ 0031 ÷ 002C ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 002C ÷ 0031 ÷ 002C ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0031\u002C", new String[] { "\u0031" }); - // ÷ 002C × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 002C × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u0031\u002C", new String[] { "\u0031" }); - // ÷ 002C ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 002C ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0031\u002E\u2060", new String[] { "\u0031" }); - // ÷ 002C × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 002C × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u002C\u0308\u0031\u002E\u2060", new String[] { "\u0031" }); - // ÷ 0027 ÷ 0001 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0027 ÷ 0001 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0001", new String[] { }); - // ÷ 0027 × 0308 ÷ 0001 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 0001 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u0001", new String[] { }); - // ÷ 0027 ÷ 000D ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0027 ÷ 000D ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\r", new String[] { }); - // ÷ 0027 × 0308 ÷ 000D ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 000D ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\r", new String[] { }); - // ÷ 0027 ÷ 000A ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0027 ÷ 000A ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\n", new String[] { }); - // ÷ 0027 × 0308 ÷ 000A ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 000A ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\n", new String[] { }); - // ÷ 0027 ÷ 000B ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0027 ÷ 000B ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u000B", new String[] { }); - // ÷ 0027 × 0308 ÷ 000B ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 000B ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u000B", new String[] { }); - // ÷ 0027 ÷ 3031 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0027 ÷ 3031 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u3031", new String[] { "\u3031" }); - // ÷ 0027 × 0308 ÷ 3031 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 3031 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u3031", new String[] { "\u3031" }); - // ÷ 0027 ÷ 0041 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0027 ÷ 0041 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0041", new String[] { "\u0041" }); - // ÷ 0027 × 0308 ÷ 0041 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 0041 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u0041", new String[] { "\u0041" }); - // ÷ 0027 ÷ 003A ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0027 ÷ 003A ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u003A", new String[] { }); - // ÷ 0027 × 0308 ÷ 003A ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 003A ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u003A", new String[] { }); - // ÷ 0027 ÷ 002C ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0027 ÷ 002C ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u002C", new String[] { }); - // ÷ 0027 × 0308 ÷ 002C ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 002C ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u002C", new String[] { }); - // ÷ 0027 ÷ 0027 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0027 ÷ 0027 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0027", new String[] { }); - // ÷ 0027 × 0308 ÷ 0027 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 0027 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u0027", new String[] { }); - // ÷ 0027 ÷ 0030 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0027 ÷ 0030 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0030", new String[] { "\u0030" }); - // ÷ 0027 × 0308 ÷ 0030 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 0030 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u0030", new String[] { "\u0030" }); - // ÷ 0027 ÷ 005F ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0027 ÷ 005F ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u005F", new String[] { }); - // ÷ 0027 × 0308 ÷ 005F ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 005F ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u005F", new String[] { }); - // ÷ 0027 × 00AD ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0027 × 00AD ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u00AD", new String[] { }); - // ÷ 0027 × 0308 × 00AD ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0027 × 0308 × 00AD ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u00AD", new String[] { }); - // ÷ 0027 × 0300 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0027 × 0300 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0300", new String[] { }); - // ÷ 0027 × 0308 × 0300 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0027 × 0308 × 0300 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u0300", new String[] { }); - // ÷ 0027 ÷ 0061 × 2060 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0027 ÷ 0061 × 2060 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0061\u2060", new String[] { "\u0061\u2060" }); - // ÷ 0027 × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u0061\u2060", new String[] { "\u0061\u2060" }); - // ÷ 0027 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0027 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0061\u003A", new String[] { "\u0061" }); - // ÷ 0027 × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u0061\u003A", new String[] { "\u0061" }); - // ÷ 0027 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0027 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0061\u0027", new String[] { "\u0061" }); - // ÷ 0027 × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u0061\u0027", new String[] { "\u0061" }); - // ÷ 0027 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0027 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0061\u0027\u2060", new String[] { "\u0061" }); - // ÷ 0027 × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u0061\u0027\u2060", new String[] { "\u0061" }); - // ÷ 0027 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0027 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0061\u002C", new String[] { "\u0061" }); - // ÷ 0027 × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u0061\u002C", new String[] { "\u0061" }); - // ÷ 0027 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0027 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0031\u003A", new String[] { "\u0031" }); - // ÷ 0027 × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u0031\u003A", new String[] { "\u0031" }); - // ÷ 0027 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0027 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0031\u0027", new String[] { "\u0031" }); - // ÷ 0027 × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u0031\u0027", new String[] { "\u0031" }); - // ÷ 0027 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0027 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0031\u002C", new String[] { "\u0031" }); - // ÷ 0027 × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u0031\u002C", new String[] { "\u0031" }); - // ÷ 0027 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0027 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0031\u002E\u2060", new String[] { "\u0031" }); - // ÷ 0027 × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0027 × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0027\u0308\u0031\u002E\u2060", new String[] { "\u0031" }); - // ÷ 0030 ÷ 0001 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0030 ÷ 0001 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0001", new String[] { "\u0030" }); - // ÷ 0030 × 0308 ÷ 0001 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0030 × 0308 ÷ 0001 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u0001", new String[] { "\u0030\u0308" }); - // ÷ 0030 ÷ 000D ÷ # ÷ [0.2] DIGIT ZERO (Numeric) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0030 ÷ 000D ÷ # ÷ [0.2] DIGIT ZERO (Numeric) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\r", new String[] { "\u0030" }); - // ÷ 0030 × 0308 ÷ 000D ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0030 × 0308 ÷ 000D ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\r", new String[] { "\u0030\u0308" }); - // ÷ 0030 ÷ 000A ÷ # ÷ [0.2] DIGIT ZERO (Numeric) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0030 ÷ 000A ÷ # ÷ [0.2] DIGIT ZERO (Numeric) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\n", new String[] { "\u0030" }); - // ÷ 0030 × 0308 ÷ 000A ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0030 × 0308 ÷ 000A ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\n", new String[] { "\u0030\u0308" }); - // ÷ 0030 ÷ 000B ÷ # ÷ [0.2] DIGIT ZERO (Numeric) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0030 ÷ 000B ÷ # ÷ [0.2] DIGIT ZERO (Numeric) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u000B", new String[] { "\u0030" }); - // ÷ 0030 × 0308 ÷ 000B ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0030 × 0308 ÷ 000B ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u000B", new String[] { "\u0030\u0308" }); - // ÷ 0030 ÷ 3031 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0030 ÷ 3031 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u3031", new String[] { "\u0030", "\u3031" }); - // ÷ 0030 × 0308 ÷ 3031 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0030 × 0308 ÷ 3031 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u3031", new String[] { "\u0030\u0308", "\u3031" }); - // ÷ 0030 × 0041 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [10.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0030 × 0041 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [10.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0041", new String[] { "\u0030\u0041" }); - // ÷ 0030 × 0308 × 0041 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [10.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0030 × 0308 × 0041 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [10.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u0041", new String[] { "\u0030\u0308\u0041" }); - // ÷ 0030 ÷ 003A ÷ # ÷ [0.2] DIGIT ZERO (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0030 ÷ 003A ÷ # ÷ [0.2] DIGIT ZERO (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u003A", new String[] { "\u0030" }); - // ÷ 0030 × 0308 ÷ 003A ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0030 × 0308 ÷ 003A ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u003A", new String[] { "\u0030\u0308" }); - // ÷ 0030 ÷ 002C ÷ # ÷ [0.2] DIGIT ZERO (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0030 ÷ 002C ÷ # ÷ [0.2] DIGIT ZERO (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u002C", new String[] { "\u0030" }); - // ÷ 0030 × 0308 ÷ 002C ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0030 × 0308 ÷ 002C ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u002C", new String[] { "\u0030\u0308" }); - // ÷ 0030 ÷ 0027 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0030 ÷ 0027 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0027", new String[] { "\u0030" }); - // ÷ 0030 × 0308 ÷ 0027 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0030 × 0308 ÷ 0027 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u0027", new String[] { "\u0030\u0308" }); - // ÷ 0030 × 0030 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [8.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0030 × 0030 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [8.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0030", new String[] { "\u0030\u0030" }); - // ÷ 0030 × 0308 × 0030 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [8.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0030 × 0308 × 0030 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [8.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u0030", new String[] { "\u0030\u0308\u0030" }); - // ÷ 0030 × 005F ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0030 × 005F ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u005F", new String[] { "\u0030\u005F" }); - // ÷ 0030 × 0308 × 005F ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0030 × 0308 × 005F ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u005F", new String[] { "\u0030\u0308\u005F" }); - // ÷ 0030 × 00AD ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0030 × 00AD ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u00AD", new String[] { "\u0030\u00AD" }); - // ÷ 0030 × 0308 × 00AD ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0030 × 0308 × 00AD ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u00AD", new String[] { "\u0030\u0308\u00AD" }); - // ÷ 0030 × 0300 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0030 × 0300 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0300", new String[] { "\u0030\u0300" }); - // ÷ 0030 × 0308 × 0300 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0030 × 0308 × 0300 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u0300", new String[] { "\u0030\u0308\u0300" }); - // ÷ 0030 × 0061 × 2060 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [10.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0030 × 0061 × 2060 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [10.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0061\u2060", new String[] { "\u0030\u0061\u2060" }); - // ÷ 0030 × 0308 × 0061 × 2060 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [10.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0030 × 0308 × 0061 × 2060 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [10.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u0061\u2060", new String[] { "\u0030\u0308\u0061\u2060" }); - // ÷ 0030 × 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0030 × 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0061\u003A", new String[] { "\u0030\u0061" }); - // ÷ 0030 × 0308 × 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0030 × 0308 × 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u0061\u003A", new String[] { "\u0030\u0308\u0061" }); - // ÷ 0030 × 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0030 × 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0061\u0027", new String[] { "\u0030\u0061" }); - // ÷ 0030 × 0308 × 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0030 × 0308 × 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u0061\u0027", new String[] { "\u0030\u0308\u0061" }); - // ÷ 0030 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0030 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0061\u0027\u2060", new String[] { "\u0030\u0061" }); - // ÷ 0030 × 0308 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0030 × 0308 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u0061\u0027\u2060", new String[] { "\u0030\u0308\u0061" }); - // ÷ 0030 × 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0030 × 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0061\u002C", new String[] { "\u0030\u0061" }); - // ÷ 0030 × 0308 × 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0030 × 0308 × 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u0061\u002C", new String[] { "\u0030\u0308\u0061" }); - // ÷ 0030 × 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0030 × 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0031\u003A", new String[] { "\u0030\u0031" }); - // ÷ 0030 × 0308 × 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0030 × 0308 × 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u0031\u003A", new String[] { "\u0030\u0308\u0031" }); - // ÷ 0030 × 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0030 × 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0031\u0027", new String[] { "\u0030\u0031" }); - // ÷ 0030 × 0308 × 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0030 × 0308 × 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u0031\u0027", new String[] { "\u0030\u0308\u0031" }); - // ÷ 0030 × 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0030 × 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0031\u002C", new String[] { "\u0030\u0031" }); - // ÷ 0030 × 0308 × 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0030 × 0308 × 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u0031\u002C", new String[] { "\u0030\u0308\u0031" }); - // ÷ 0030 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0030 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0031\u002E\u2060", new String[] { "\u0030\u0031" }); - // ÷ 0030 × 0308 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0030 × 0308 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ZERO (Numeric) × [4.0] COMBINING DIAERESIS (Extend_FE) × [8.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0030\u0308\u0031\u002E\u2060", new String[] { "\u0030\u0308\u0031" }); - // ÷ 005F ÷ 0001 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 005F ÷ 0001 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0001", new String[] { }); - // ÷ 005F × 0308 ÷ 0001 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 005F × 0308 ÷ 0001 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u0001", new String[] { }); - // ÷ 005F ÷ 000D ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 005F ÷ 000D ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\r", new String[] { }); - // ÷ 005F × 0308 ÷ 000D ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 005F × 0308 ÷ 000D ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\r", new String[] { }); - // ÷ 005F ÷ 000A ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 005F ÷ 000A ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\n", new String[] { }); - // ÷ 005F × 0308 ÷ 000A ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 005F × 0308 ÷ 000A ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\n", new String[] { }); - // ÷ 005F ÷ 000B ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 005F ÷ 000B ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u000B", new String[] { }); - // ÷ 005F × 0308 ÷ 000B ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 005F × 0308 ÷ 000B ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u000B", new String[] { }); - // ÷ 005F × 3031 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 005F × 3031 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u3031", new String[] { "\u005F\u3031" }); - // ÷ 005F × 0308 × 3031 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 005F × 0308 × 3031 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u3031", new String[] { "\u005F\u0308\u3031" }); - // ÷ 005F × 0041 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 005F × 0041 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0041", new String[] { "\u005F\u0041" }); - // ÷ 005F × 0308 × 0041 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 005F × 0308 × 0041 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u0041", new String[] { "\u005F\u0308\u0041" }); - // ÷ 005F ÷ 003A ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 005F ÷ 003A ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u003A", new String[] { }); - // ÷ 005F × 0308 ÷ 003A ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 005F × 0308 ÷ 003A ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u003A", new String[] { }); - // ÷ 005F ÷ 002C ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 005F ÷ 002C ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u002C", new String[] { }); - // ÷ 005F × 0308 ÷ 002C ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 005F × 0308 ÷ 002C ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u002C", new String[] { }); - // ÷ 005F ÷ 0027 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 005F ÷ 0027 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0027", new String[] { }); - // ÷ 005F × 0308 ÷ 0027 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 005F × 0308 ÷ 0027 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u0027", new String[] { }); - // ÷ 005F × 0030 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 005F × 0030 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0030", new String[] { "\u005F\u0030" }); - // ÷ 005F × 0308 × 0030 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 005F × 0308 × 0030 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u0030", new String[] { "\u005F\u0308\u0030" }); - // ÷ 005F × 005F ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 005F × 005F ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u005F", new String[] { }); - // ÷ 005F × 0308 × 005F ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 005F × 0308 × 005F ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u005F", new String[] { }); - // ÷ 005F × 00AD ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 005F × 00AD ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u00AD", new String[] { }); - // ÷ 005F × 0308 × 00AD ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 005F × 0308 × 00AD ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u00AD", new String[] { }); - // ÷ 005F × 0300 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 005F × 0300 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0300", new String[] { }); - // ÷ 005F × 0308 × 0300 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 005F × 0308 × 0300 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u0300", new String[] { }); - // ÷ 005F × 0061 × 2060 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 005F × 0061 × 2060 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0061\u2060", new String[] { "\u005F\u0061\u2060" }); - // ÷ 005F × 0308 × 0061 × 2060 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 005F × 0308 × 0061 × 2060 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u0061\u2060", new String[] { "\u005F\u0308\u0061\u2060" }); - // ÷ 005F × 0061 ÷ 003A ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 005F × 0061 ÷ 003A ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0061\u003A", new String[] { "\u005F\u0061" }); - // ÷ 005F × 0308 × 0061 ÷ 003A ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 005F × 0308 × 0061 ÷ 003A ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u0061\u003A", new String[] { "\u005F\u0308\u0061" }); - // ÷ 005F × 0061 ÷ 0027 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 005F × 0061 ÷ 0027 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0061\u0027", new String[] { "\u005F\u0061" }); - // ÷ 005F × 0308 × 0061 ÷ 0027 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 005F × 0308 × 0061 ÷ 0027 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u0061\u0027", new String[] { "\u005F\u0308\u0061" }); - // ÷ 005F × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 005F × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0061\u0027\u2060", new String[] { "\u005F\u0061" }); - // ÷ 005F × 0308 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 005F × 0308 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u0061\u0027\u2060", new String[] { "\u005F\u0308\u0061" }); - // ÷ 005F × 0061 ÷ 002C ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 005F × 0061 ÷ 002C ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0061\u002C", new String[] { "\u005F\u0061" }); - // ÷ 005F × 0308 × 0061 ÷ 002C ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 005F × 0308 × 0061 ÷ 002C ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u0061\u002C", new String[] { "\u005F\u0308\u0061" }); - // ÷ 005F × 0031 ÷ 003A ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 005F × 0031 ÷ 003A ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0031\u003A", new String[] { "\u005F\u0031" }); - // ÷ 005F × 0308 × 0031 ÷ 003A ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 005F × 0308 × 0031 ÷ 003A ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u0031\u003A", new String[] { "\u005F\u0308\u0031" }); - // ÷ 005F × 0031 ÷ 0027 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 005F × 0031 ÷ 0027 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0031\u0027", new String[] { "\u005F\u0031" }); - // ÷ 005F × 0308 × 0031 ÷ 0027 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 005F × 0308 × 0031 ÷ 0027 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u0031\u0027", new String[] { "\u005F\u0308\u0031" }); - // ÷ 005F × 0031 ÷ 002C ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 005F × 0031 ÷ 002C ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0031\u002C", new String[] { "\u005F\u0031" }); - // ÷ 005F × 0308 × 0031 ÷ 002C ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 005F × 0308 × 0031 ÷ 002C ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u0031\u002C", new String[] { "\u005F\u0308\u0031" }); - // ÷ 005F × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 005F × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0031\u002E\u2060", new String[] { "\u005F\u0031" }); - // ÷ 005F × 0308 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 005F × 0308 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LOW LINE (ExtendNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u005F\u0308\u0031\u002E\u2060", new String[] { "\u005F\u0308\u0031" }); - // ÷ 00AD ÷ 0001 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 00AD ÷ 0001 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0001", new String[] { }); - // ÷ 00AD × 0308 ÷ 0001 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 0001 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u0001", new String[] { }); - // ÷ 00AD ÷ 000D ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 00AD ÷ 000D ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\r", new String[] { }); - // ÷ 00AD × 0308 ÷ 000D ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 000D ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\r", new String[] { }); - // ÷ 00AD ÷ 000A ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 00AD ÷ 000A ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\n", new String[] { }); - // ÷ 00AD × 0308 ÷ 000A ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 000A ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\n", new String[] { }); - // ÷ 00AD ÷ 000B ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 00AD ÷ 000B ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u000B", new String[] { }); - // ÷ 00AD × 0308 ÷ 000B ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 000B ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u000B", new String[] { }); - // ÷ 00AD ÷ 3031 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 00AD ÷ 3031 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u3031", new String[] { "\u3031" }); - // ÷ 00AD × 0308 ÷ 3031 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 3031 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u3031", new String[] { "\u3031" }); - // ÷ 00AD ÷ 0041 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 00AD ÷ 0041 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0041", new String[] { "\u0041" }); - // ÷ 00AD × 0308 ÷ 0041 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 0041 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u0041", new String[] { "\u0041" }); - // ÷ 00AD ÷ 003A ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 00AD ÷ 003A ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u003A", new String[] { }); - // ÷ 00AD × 0308 ÷ 003A ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 003A ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u003A", new String[] { }); - // ÷ 00AD ÷ 002C ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 00AD ÷ 002C ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u002C", new String[] { }); - // ÷ 00AD × 0308 ÷ 002C ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 002C ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u002C", new String[] { }); - // ÷ 00AD ÷ 0027 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 00AD ÷ 0027 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0027", new String[] { }); - // ÷ 00AD × 0308 ÷ 0027 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 0027 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u0027", new String[] { }); - // ÷ 00AD ÷ 0030 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 00AD ÷ 0030 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0030", new String[] { "\u0030" }); - // ÷ 00AD × 0308 ÷ 0030 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 0030 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u0030", new String[] { "\u0030" }); - // ÷ 00AD ÷ 005F ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 00AD ÷ 005F ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u005F", new String[] { }); - // ÷ 00AD × 0308 ÷ 005F ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 005F ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u005F", new String[] { }); - // ÷ 00AD × 00AD ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 00AD × 00AD ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u00AD", new String[] { }); - // ÷ 00AD × 0308 × 00AD ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 00AD × 0308 × 00AD ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u00AD", new String[] { }); - // ÷ 00AD × 0300 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 00AD × 0300 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0300", new String[] { }); - // ÷ 00AD × 0308 × 0300 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 00AD × 0308 × 0300 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u0300", new String[] { }); - // ÷ 00AD ÷ 0061 × 2060 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 00AD ÷ 0061 × 2060 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0061\u2060", new String[] { "\u0061\u2060" }); - // ÷ 00AD × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u0061\u2060", new String[] { "\u0061\u2060" }); - // ÷ 00AD ÷ 0061 ÷ 003A ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 00AD ÷ 0061 ÷ 003A ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0061\u003A", new String[] { "\u0061" }); - // ÷ 00AD × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u0061\u003A", new String[] { "\u0061" }); - // ÷ 00AD ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 00AD ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0061\u0027", new String[] { "\u0061" }); - // ÷ 00AD × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u0061\u0027", new String[] { "\u0061" }); - // ÷ 00AD ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 00AD ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0061\u0027\u2060", new String[] { "\u0061" }); - // ÷ 00AD × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u0061\u0027\u2060", new String[] { "\u0061" }); - // ÷ 00AD ÷ 0061 ÷ 002C ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 00AD ÷ 0061 ÷ 002C ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0061\u002C", new String[] { "\u0061" }); - // ÷ 00AD × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u0061\u002C", new String[] { "\u0061" }); - // ÷ 00AD ÷ 0031 ÷ 003A ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 00AD ÷ 0031 ÷ 003A ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0031\u003A", new String[] { "\u0031" }); - // ÷ 00AD × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u0031\u003A", new String[] { "\u0031" }); - // ÷ 00AD ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 00AD ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0031\u0027", new String[] { "\u0031" }); - // ÷ 00AD × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u0031\u0027", new String[] { "\u0031" }); - // ÷ 00AD ÷ 0031 ÷ 002C ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 00AD ÷ 0031 ÷ 002C ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0031\u002C", new String[] { "\u0031" }); - // ÷ 00AD × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u0031\u002C", new String[] { "\u0031" }); - // ÷ 00AD ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 00AD ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0031\u002E\u2060", new String[] { "\u0031" }); - // ÷ 00AD × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 00AD × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] SOFT HYPHEN (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u00AD\u0308\u0031\u002E\u2060", new String[] { "\u0031" }); - // ÷ 0300 ÷ 0001 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0300 ÷ 0001 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0001", new String[] { }); - // ÷ 0300 × 0308 ÷ 0001 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 0001 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u0001", new String[] { }); - // ÷ 0300 ÷ 000D ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0300 ÷ 000D ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\r", new String[] { }); - // ÷ 0300 × 0308 ÷ 000D ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 000D ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\r", new String[] { }); - // ÷ 0300 ÷ 000A ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0300 ÷ 000A ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\n", new String[] { }); - // ÷ 0300 × 0308 ÷ 000A ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 000A ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\n", new String[] { }); - // ÷ 0300 ÷ 000B ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0300 ÷ 000B ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u000B", new String[] { }); - // ÷ 0300 × 0308 ÷ 000B ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 000B ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u000B", new String[] { }); - // ÷ 0300 ÷ 3031 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0300 ÷ 3031 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u3031", new String[] { "\u3031" }); - // ÷ 0300 × 0308 ÷ 3031 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 3031 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u3031", new String[] { "\u3031" }); - // ÷ 0300 ÷ 0041 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0300 ÷ 0041 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0041", new String[] { "\u0041" }); - // ÷ 0300 × 0308 ÷ 0041 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 0041 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u0041", new String[] { "\u0041" }); - // ÷ 0300 ÷ 003A ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0300 ÷ 003A ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u003A", new String[] { }); - // ÷ 0300 × 0308 ÷ 003A ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 003A ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u003A", new String[] { }); - // ÷ 0300 ÷ 002C ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0300 ÷ 002C ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u002C", new String[] { }); - // ÷ 0300 × 0308 ÷ 002C ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 002C ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u002C", new String[] { }); - // ÷ 0300 ÷ 0027 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0300 ÷ 0027 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0027", new String[] { }); - // ÷ 0300 × 0308 ÷ 0027 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 0027 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u0027", new String[] { }); - // ÷ 0300 ÷ 0030 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0300 ÷ 0030 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0030", new String[] { "\u0030" }); - // ÷ 0300 × 0308 ÷ 0030 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 0030 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u0030", new String[] { "\u0030" }); - // ÷ 0300 ÷ 005F ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0300 ÷ 005F ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u005F", new String[] { }); - // ÷ 0300 × 0308 ÷ 005F ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 005F ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u005F", new String[] { }); - // ÷ 0300 × 00AD ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0300 × 00AD ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u00AD", new String[] { }); - // ÷ 0300 × 0308 × 00AD ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0300 × 0308 × 00AD ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u00AD", new String[] { }); - // ÷ 0300 × 0300 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0300 × 0300 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0300", new String[] { }); - // ÷ 0300 × 0308 × 0300 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0300 × 0308 × 0300 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u0300", new String[] { }); - // ÷ 0300 ÷ 0061 × 2060 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0300 ÷ 0061 × 2060 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0061\u2060", new String[] { "\u0061\u2060" }); - // ÷ 0300 × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u0061\u2060", new String[] { "\u0061\u2060" }); - // ÷ 0300 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0300 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0061\u003A", new String[] { "\u0061" }); - // ÷ 0300 × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u0061\u003A", new String[] { "\u0061" }); - // ÷ 0300 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0300 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0061\u0027", new String[] { "\u0061" }); - // ÷ 0300 × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u0061\u0027", new String[] { "\u0061" }); - // ÷ 0300 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0300 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0061\u0027\u2060", new String[] { "\u0061" }); - // ÷ 0300 × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u0061\u0027\u2060", new String[] { "\u0061" }); - // ÷ 0300 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0300 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0061\u002C", new String[] { "\u0061" }); - // ÷ 0300 × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u0061\u002C", new String[] { "\u0061" }); - // ÷ 0300 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0300 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0031\u003A", new String[] { "\u0031" }); - // ÷ 0300 × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u0031\u003A", new String[] { "\u0031" }); - // ÷ 0300 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0300 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0031\u0027", new String[] { "\u0031" }); - // ÷ 0300 × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u0031\u0027", new String[] { "\u0031" }); - // ÷ 0300 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0300 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0031\u002C", new String[] { "\u0031" }); - // ÷ 0300 × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u0031\u002C", new String[] { "\u0031" }); - // ÷ 0300 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0300 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0031\u002E\u2060", new String[] { "\u0031" }); - // ÷ 0300 × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0300 × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0300\u0308\u0031\u002E\u2060", new String[] { "\u0031" }); - // ÷ 0061 × 2060 ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0061 × 2060 ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0001", new String[] { "\u0061\u2060" }); - // ÷ 0061 × 2060 × 0308 ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0001", new String[] { "\u0061\u2060\u0308" }); - // ÷ 0061 × 2060 ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0061 × 2060 ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\r", new String[] { "\u0061\u2060" }); - // ÷ 0061 × 2060 × 0308 ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\r", new String[] { "\u0061\u2060\u0308" }); - // ÷ 0061 × 2060 ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0061 × 2060 ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\n", new String[] { "\u0061\u2060" }); - // ÷ 0061 × 2060 × 0308 ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\n", new String[] { "\u0061\u2060\u0308" }); - // ÷ 0061 × 2060 ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0061 × 2060 ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u000B", new String[] { "\u0061\u2060" }); - // ÷ 0061 × 2060 × 0308 ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u000B", new String[] { "\u0061\u2060\u0308" }); - // ÷ 0061 × 2060 ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0061 × 2060 ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u3031", new String[] { "\u0061\u2060", "\u3031" }); - // ÷ 0061 × 2060 × 0308 ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u3031", new String[] { "\u0061\u2060\u0308", "\u3031" }); - // ÷ 0061 × 2060 × 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0061 × 2060 × 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0041", new String[] { "\u0061\u2060\u0041" }); - // ÷ 0061 × 2060 × 0308 × 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 × 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0041", new String[] { "\u0061\u2060\u0308\u0041" }); - // ÷ 0061 × 2060 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 × 2060 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u003A", new String[] { "\u0061\u2060" }); - // ÷ 0061 × 2060 × 0308 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u003A", new String[] { "\u0061\u2060\u0308" }); - // ÷ 0061 × 2060 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 × 2060 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u002C", new String[] { "\u0061\u2060" }); - // ÷ 0061 × 2060 × 0308 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u002C", new String[] { "\u0061\u2060\u0308" }); - // ÷ 0061 × 2060 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 × 2060 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0027", new String[] { "\u0061\u2060" }); - // ÷ 0061 × 2060 × 0308 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0027", new String[] { "\u0061\u2060\u0308" }); - // ÷ 0061 × 2060 × 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [9.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0061 × 2060 × 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [9.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0030", new String[] { "\u0061\u2060\u0030" }); - // ÷ 0061 × 2060 × 0308 × 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 × 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0030", new String[] { "\u0061\u2060\u0308\u0030" }); - // ÷ 0061 × 2060 × 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0061 × 2060 × 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u005F", new String[] { "\u0061\u2060\u005F" }); - // ÷ 0061 × 2060 × 0308 × 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 × 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [13.1] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u005F", new String[] { "\u0061\u2060\u0308\u005F" }); - // ÷ 0061 × 2060 × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0061 × 2060 × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u00AD", new String[] { "\u0061\u2060\u00AD" }); - // ÷ 0061 × 2060 × 0308 × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u00AD", new String[] { "\u0061\u2060\u0308\u00AD" }); - // ÷ 0061 × 2060 × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0061 × 2060 × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0300", new String[] { "\u0061\u2060\u0300" }); - // ÷ 0061 × 2060 × 0308 × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0300", new String[] { "\u0061\u2060\u0308\u0300" }); - // ÷ 0061 × 2060 × 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 × 2060 × 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0061\u2060", new String[] { "\u0061\u2060\u0061\u2060" }); - // ÷ 0061 × 2060 × 0308 × 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 × 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0061\u2060", new String[] { "\u0061\u2060\u0308\u0061\u2060" }); - // ÷ 0061 × 2060 × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 × 2060 × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0061\u003A", new String[] { "\u0061\u2060\u0061" }); - // ÷ 0061 × 2060 × 0308 × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0061\u003A", new String[] { "\u0061\u2060\u0308\u0061" }); - // ÷ 0061 × 2060 × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 × 2060 × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0061\u0027", new String[] { "\u0061\u2060\u0061" }); - // ÷ 0061 × 2060 × 0308 × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0061\u0027", new String[] { "\u0061\u2060\u0308\u0061" }); - // ÷ 0061 × 2060 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 × 2060 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0061\u0027\u2060", new String[] { "\u0061\u2060\u0061" }); - // ÷ 0061 × 2060 × 0308 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0061\u0027\u2060", new String[] { "\u0061\u2060\u0308\u0061" }); - // ÷ 0061 × 2060 × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 × 2060 × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0061\u002C", new String[] { "\u0061\u2060\u0061" }); - // ÷ 0061 × 2060 × 0308 × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [5.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0061\u002C", new String[] { "\u0061\u2060\u0308\u0061" }); - // ÷ 0061 × 2060 × 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 × 2060 × 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0031\u003A", new String[] { "\u0061\u2060\u0031" }); - // ÷ 0061 × 2060 × 0308 × 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 × 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0031\u003A", new String[] { "\u0061\u2060\u0308\u0031" }); - // ÷ 0061 × 2060 × 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 × 2060 × 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0031\u0027", new String[] { "\u0061\u2060\u0031" }); - // ÷ 0061 × 2060 × 0308 × 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 × 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0031\u0027", new String[] { "\u0061\u2060\u0308\u0031" }); - // ÷ 0061 × 2060 × 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 × 2060 × 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0031\u002C", new String[] { "\u0061\u2060\u0031" }); - // ÷ 0061 × 2060 × 0308 × 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 × 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0031\u002C", new String[] { "\u0061\u2060\u0308\u0031" }); - // ÷ 0061 × 2060 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 × 2060 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0031\u002E\u2060", new String[] { "\u0061\u2060\u0031" }); - // ÷ 0061 × 2060 × 0308 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 × 2060 × 0308 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [9.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u2060\u0308\u0031\u002E\u2060", new String[] { "\u0061\u2060\u0308\u0031" }); - // ÷ 0061 ÷ 003A ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0061 ÷ 003A ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0001", new String[] { "\u0061" }); - // ÷ 0061 ÷ 003A × 0308 ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0061 ÷ 003A × 0308 ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0001", new String[] { "\u0061" }); - // ÷ 0061 ÷ 003A ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0061 ÷ 003A ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\r", new String[] { "\u0061" }); - // ÷ 0061 ÷ 003A × 0308 ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0061 ÷ 003A × 0308 ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\r", new String[] { "\u0061" }); - // ÷ 0061 ÷ 003A ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0061 ÷ 003A ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\n", new String[] { "\u0061" }); - // ÷ 0061 ÷ 003A × 0308 ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0061 ÷ 003A × 0308 ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\n", new String[] { "\u0061" }); - // ÷ 0061 ÷ 003A ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0061 ÷ 003A ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u000B", new String[] { "\u0061" }); - // ÷ 0061 ÷ 003A × 0308 ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0061 ÷ 003A × 0308 ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u000B", new String[] { "\u0061" }); - // ÷ 0061 ÷ 003A ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0061 ÷ 003A ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u3031", new String[] { "\u0061", "\u3031" }); - // ÷ 0061 ÷ 003A × 0308 ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0061 ÷ 003A × 0308 ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u3031", new String[] { "\u0061", "\u3031" }); - // ÷ 0061 × 003A × 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [7.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0061 × 003A × 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [7.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0041", new String[] { "\u0061\u003A\u0041" }); - // ÷ 0061 × 003A × 0308 × 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0061 × 003A × 0308 × 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0041", new String[] { "\u0061\u003A\u0308\u0041" }); - // ÷ 0061 ÷ 003A ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 ÷ 003A ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u003A", new String[] { "\u0061" }); - // ÷ 0061 ÷ 003A × 0308 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 ÷ 003A × 0308 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u003A", new String[] { "\u0061" }); - // ÷ 0061 ÷ 003A ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 ÷ 003A ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u002C", new String[] { "\u0061" }); - // ÷ 0061 ÷ 003A × 0308 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 ÷ 003A × 0308 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u002C", new String[] { "\u0061" }); - // ÷ 0061 ÷ 003A ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 ÷ 003A ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0027", new String[] { "\u0061" }); - // ÷ 0061 ÷ 003A × 0308 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 ÷ 003A × 0308 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0027", new String[] { "\u0061" }); - // ÷ 0061 ÷ 003A ÷ 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0061 ÷ 003A ÷ 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0030", new String[] { "\u0061", "\u0030" }); - // ÷ 0061 ÷ 003A × 0308 ÷ 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0061 ÷ 003A × 0308 ÷ 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0030", new String[] { "\u0061", "\u0030" }); - // ÷ 0061 ÷ 003A ÷ 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0061 ÷ 003A ÷ 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u005F", new String[] { "\u0061" }); - // ÷ 0061 ÷ 003A × 0308 ÷ 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0061 ÷ 003A × 0308 ÷ 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u005F", new String[] { "\u0061" }); - // ÷ 0061 ÷ 003A × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 003A × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u00AD", new String[] { "\u0061" }); - // ÷ 0061 ÷ 003A × 0308 × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 003A × 0308 × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u00AD", new String[] { "\u0061" }); - // ÷ 0061 ÷ 003A × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0061 ÷ 003A × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0300", new String[] { "\u0061" }); - // ÷ 0061 ÷ 003A × 0308 × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0061 ÷ 003A × 0308 × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0300", new String[] { "\u0061" }); - // ÷ 0061 × 003A × 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [7.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 × 003A × 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [7.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0061\u2060", new String[] { "\u0061\u003A\u0061\u2060" }); - // ÷ 0061 × 003A × 0308 × 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 × 003A × 0308 × 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0061\u2060", new String[] { "\u0061\u003A\u0308\u0061\u2060" }); - // ÷ 0061 × 003A × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 × 003A × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0061\u003A", new String[] { "\u0061\u003A\u0061" }); - // ÷ 0061 × 003A × 0308 × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 × 003A × 0308 × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0061\u003A", new String[] { "\u0061\u003A\u0308\u0061" }); - // ÷ 0061 × 003A × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 × 003A × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0061\u0027", new String[] { "\u0061\u003A\u0061" }); - // ÷ 0061 × 003A × 0308 × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 × 003A × 0308 × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0061\u0027", new String[] { "\u0061\u003A\u0308\u0061" }); - // ÷ 0061 × 003A × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 × 003A × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0061\u0027\u2060", new String[] { "\u0061\u003A\u0061" }); - // ÷ 0061 × 003A × 0308 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 × 003A × 0308 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0061\u0027\u2060", new String[] { "\u0061\u003A\u0308\u0061" }); - // ÷ 0061 × 003A × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 × 003A × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0061\u002C", new String[] { "\u0061\u003A\u0061" }); - // ÷ 0061 × 003A × 0308 × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 × 003A × 0308 × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0061\u002C", new String[] { "\u0061\u003A\u0308\u0061" }); - // ÷ 0061 ÷ 003A ÷ 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 ÷ 003A ÷ 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0031\u003A", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 003A × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 ÷ 003A × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0031\u003A", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 003A ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 ÷ 003A ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0031\u0027", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 003A × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 ÷ 003A × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0031\u0027", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 003A ÷ 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 ÷ 003A ÷ 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0031\u002C", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 003A × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 ÷ 003A × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0031\u002C", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 003A ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 003A ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0031\u002E\u2060", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 003A × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 003A × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u003A\u0308\u0031\u002E\u2060", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 0027 ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0061 ÷ 0027 ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0001", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 0308 ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 0308 ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0001", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0061 ÷ 0027 ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\r", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 0308 ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 0308 ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\r", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0061 ÷ 0027 ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\n", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 0308 ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 0308 ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\n", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0061 ÷ 0027 ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u000B", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 0308 ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 0308 ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u000B", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0061 ÷ 0027 ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u3031", new String[] { "\u0061", "\u3031" }); - // ÷ 0061 ÷ 0027 × 0308 ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 0308 ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u3031", new String[] { "\u0061", "\u3031" }); - // ÷ 0061 × 0027 × 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0061 × 0027 × 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0041", new String[] { "\u0061\u0027\u0041" }); - // ÷ 0061 × 0027 × 0308 × 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0061 × 0027 × 0308 × 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0041", new String[] { "\u0061\u0027\u0308\u0041" }); - // ÷ 0061 ÷ 0027 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 ÷ 0027 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u003A", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 0308 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 0308 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u003A", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 ÷ 0027 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u002C", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 0308 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 0308 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u002C", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 ÷ 0027 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0027", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 0308 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 0308 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0027", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 ÷ 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0061 ÷ 0027 ÷ 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0030", new String[] { "\u0061", "\u0030" }); - // ÷ 0061 ÷ 0027 × 0308 ÷ 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 0308 ÷ 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0030", new String[] { "\u0061", "\u0030" }); - // ÷ 0061 ÷ 0027 ÷ 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0061 ÷ 0027 ÷ 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u005F", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 0308 ÷ 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 0308 ÷ 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u005F", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u00AD", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 0308 × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 0308 × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u00AD", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0300", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 0308 × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 0308 × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0300", new String[] { "\u0061" }); - // ÷ 0061 × 0027 × 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 × 0027 × 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0061\u2060", new String[] { "\u0061\u0027\u0061\u2060" }); - // ÷ 0061 × 0027 × 0308 × 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 × 0027 × 0308 × 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0061\u2060", new String[] { "\u0061\u0027\u0308\u0061\u2060" }); - // ÷ 0061 × 0027 × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 × 0027 × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0061\u003A", new String[] { "\u0061\u0027\u0061" }); - // ÷ 0061 × 0027 × 0308 × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 × 0027 × 0308 × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0061\u003A", new String[] { "\u0061\u0027\u0308\u0061" }); - // ÷ 0061 × 0027 × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 × 0027 × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0061\u0027", new String[] { "\u0061\u0027\u0061" }); - // ÷ 0061 × 0027 × 0308 × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 × 0027 × 0308 × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0061\u0027", new String[] { "\u0061\u0027\u0308\u0061" }); - // ÷ 0061 × 0027 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 × 0027 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0061\u0027\u2060", new String[] { "\u0061\u0027\u0061" }); - // ÷ 0061 × 0027 × 0308 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 × 0027 × 0308 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0061\u0027\u2060", new String[] { "\u0061\u0027\u0308\u0061" }); - // ÷ 0061 × 0027 × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 × 0027 × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0061\u002C", new String[] { "\u0061\u0027\u0061" }); - // ÷ 0061 × 0027 × 0308 × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 × 0027 × 0308 × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0061\u002C", new String[] { "\u0061\u0027\u0308\u0061" }); - // ÷ 0061 ÷ 0027 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 ÷ 0027 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0031\u003A", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 0027 × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0031\u003A", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 0027 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 ÷ 0027 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0031\u0027", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 0027 × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0031\u0027", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 0027 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 ÷ 0027 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0031\u002C", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 0027 × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0031\u002C", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 0027 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 0027 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0031\u002E\u2060", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 0027 × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u0308\u0031\u002E\u2060", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 0027 × 2060 ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0001", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0001", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\r", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\r", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\n", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\n", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u000B", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u000B", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u3031", new String[] { "\u0061", "\u3031" }); - // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u3031", new String[] { "\u0061", "\u3031" }); - // ÷ 0061 × 0027 × 2060 × 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0061 × 0027 × 2060 × 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0041", new String[] { "\u0061\u0027\u2060\u0041" }); - // ÷ 0061 × 0027 × 2060 × 0308 × 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0061 × 0027 × 2060 × 0308 × 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0041", new String[] { "\u0061\u0027\u2060\u0308\u0041" }); - // ÷ 0061 ÷ 0027 × 2060 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u003A", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u003A", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u002C", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u002C", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0027", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0027", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 ÷ 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 ÷ 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0030", new String[] { "\u0061", "\u0030" }); - // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0030", new String[] { "\u0061", "\u0030" }); - // ÷ 0061 ÷ 0027 × 2060 ÷ 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 ÷ 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u005F", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u005F", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u00AD", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 × 0308 × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 × 0308 × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u00AD", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0300", new String[] { "\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 × 0308 × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 × 0308 × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0300", new String[] { "\u0061" }); - // ÷ 0061 × 0027 × 2060 × 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 × 0027 × 2060 × 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0061\u2060", new String[] { "\u0061\u0027\u2060\u0061\u2060" }); - // ÷ 0061 × 0027 × 2060 × 0308 × 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 × 0027 × 2060 × 0308 × 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0061\u2060", new String[] { "\u0061\u0027\u2060\u0308\u0061\u2060" }); - // ÷ 0061 × 0027 × 2060 × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 × 0027 × 2060 × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0061\u003A", new String[] { "\u0061\u0027\u2060\u0061" }); - // ÷ 0061 × 0027 × 2060 × 0308 × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 × 0027 × 2060 × 0308 × 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0061\u003A", new String[] { "\u0061\u0027\u2060\u0308\u0061" }); - // ÷ 0061 × 0027 × 2060 × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 × 0027 × 2060 × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0061\u0027", new String[] { "\u0061\u0027\u2060\u0061" }); - // ÷ 0061 × 0027 × 2060 × 0308 × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 × 0027 × 2060 × 0308 × 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0061\u0027", new String[] { "\u0061\u0027\u2060\u0308\u0061" }); - // ÷ 0061 × 0027 × 2060 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 × 0027 × 2060 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0061\u0027\u2060", new String[] { "\u0061\u0027\u2060\u0061" }); - // ÷ 0061 × 0027 × 2060 × 0308 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 × 0027 × 2060 × 0308 × 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0061\u0027\u2060", new String[] { "\u0061\u0027\u2060\u0308\u0061" }); - // ÷ 0061 × 0027 × 2060 × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 × 0027 × 2060 × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0061\u002C", new String[] { "\u0061\u0027\u2060\u0061" }); - // ÷ 0061 × 0027 × 2060 × 0308 × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 × 0027 × 2060 × 0308 × 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [7.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0061\u002C", new String[] { "\u0061\u0027\u2060\u0308\u0061" }); - // ÷ 0061 ÷ 0027 × 2060 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0031\u003A", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0031\u003A", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 0027 × 2060 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0031\u0027", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0031\u0027", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 0027 × 2060 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0031\u002C", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0031\u002C", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 0027 × 2060 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0031\u002E\u2060", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 0027 × 2060 × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0027\u2060\u0308\u0031\u002E\u2060", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 002C ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0001", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 0001 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0001", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\r", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 000D ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\r", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\n", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 000A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\n", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u000B", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 000B ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u000B", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u3031", new String[] { "\u0061", "\u3031" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 3031 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u3031", new String[] { "\u0061", "\u3031" }); - // ÷ 0061 ÷ 002C ÷ 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0041", new String[] { "\u0061", "\u0041" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 0041 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0041", new String[] { "\u0061", "\u0041" }); - // ÷ 0061 ÷ 002C ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u003A", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u003A", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u002C", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u002C", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0027", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0027", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C ÷ 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0030", new String[] { "\u0061", "\u0030" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 0030 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0030", new String[] { "\u0061", "\u0030" }); - // ÷ 0061 ÷ 002C ÷ 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u005F", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 005F ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u005F", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 002C × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u00AD", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C × 0308 × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 × 00AD ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u00AD", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0300", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C × 0308 × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 × 0300 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0300", new String[] { "\u0061" }); - // ÷ 0061 ÷ 002C ÷ 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0061\u2060", new String[] { "\u0061", "\u0061\u2060" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0061\u2060", new String[] { "\u0061", "\u0061\u2060" }); - // ÷ 0061 ÷ 002C ÷ 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0061\u003A", new String[] { "\u0061", "\u0061" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0061\u003A", new String[] { "\u0061", "\u0061" }); - // ÷ 0061 ÷ 002C ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0061\u0027", new String[] { "\u0061", "\u0061" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0061\u0027", new String[] { "\u0061", "\u0061" }); - // ÷ 0061 ÷ 002C ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0061\u0027\u2060", new String[] { "\u0061", "\u0061" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0061\u0027\u2060", new String[] { "\u0061", "\u0061" }); - // ÷ 0061 ÷ 002C ÷ 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0061\u002C", new String[] { "\u0061", "\u0061" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0061\u002C", new String[] { "\u0061", "\u0061" }); - // ÷ 0061 ÷ 002C ÷ 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0031\u003A", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0031\u003A", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 002C ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0031\u0027", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0031\u0027", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 002C ÷ 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0031\u002C", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0031\u002C", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 002C ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 002C ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0031\u002E\u2060", new String[] { "\u0061", "\u0031" }); - // ÷ 0061 ÷ 002C × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0061 ÷ 002C × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u002C\u0308\u0031\u002E\u2060", new String[] { "\u0061", "\u0031" }); - // ÷ 0031 ÷ 003A ÷ 0001 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 0001 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0001", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 0001 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 0001 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0001", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A ÷ 000D ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 000D ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\r", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 000D ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 000D ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\r", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A ÷ 000A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 000A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\n", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 000A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 000A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\n", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A ÷ 000B ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 000B ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u000B", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 000B ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 000B ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u000B", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A ÷ 3031 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 3031 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u3031", new String[] { "\u0031", "\u3031" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 3031 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 3031 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u3031", new String[] { "\u0031", "\u3031" }); - // ÷ 0031 ÷ 003A ÷ 0041 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 0041 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0041", new String[] { "\u0031", "\u0041" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 0041 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 0041 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0041", new String[] { "\u0031", "\u0041" }); - // ÷ 0031 ÷ 003A ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u003A", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u003A", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u002C", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u002C", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0027", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0027", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A ÷ 0030 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 0030 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0030", new String[] { "\u0031", "\u0030" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 0030 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 0030 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0030", new String[] { "\u0031", "\u0030" }); - // ÷ 0031 ÷ 003A ÷ 005F ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 005F ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u005F", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 005F ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 005F ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u005F", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A × 00AD ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 003A × 00AD ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u00AD", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A × 0308 × 00AD ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 × 00AD ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u00AD", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A × 0300 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0300 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0300", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A × 0308 × 0300 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 × 0300 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0300", new String[] { "\u0031" }); - // ÷ 0031 ÷ 003A ÷ 0061 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 0061 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0061\u2060", new String[] { "\u0031", "\u0061\u2060" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0061\u2060", new String[] { "\u0031", "\u0061\u2060" }); - // ÷ 0031 ÷ 003A ÷ 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0061\u003A", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0061\u003A", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 003A ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0061\u0027", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0061\u0027", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 003A ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0061\u0027\u2060", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0061\u0027\u2060", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 003A ÷ 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0061\u002C", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0061\u002C", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 003A ÷ 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0031\u003A", new String[] { "\u0031", "\u0031" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0031\u003A", new String[] { "\u0031", "\u0031" }); - // ÷ 0031 ÷ 003A ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0031\u0027", new String[] { "\u0031", "\u0031" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0031\u0027", new String[] { "\u0031", "\u0031" }); - // ÷ 0031 ÷ 003A ÷ 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0031\u002C", new String[] { "\u0031", "\u0031" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0031\u002C", new String[] { "\u0031", "\u0031" }); - // ÷ 0031 ÷ 003A ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 003A ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0031\u002E\u2060", new String[] { "\u0031", "\u0031" }); - // ÷ 0031 ÷ 003A × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 003A × 0308 ÷ 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u003A\u0308\u0031\u002E\u2060", new String[] { "\u0031", "\u0031" }); - // ÷ 0031 ÷ 0027 ÷ 0001 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0031 ÷ 0027 ÷ 0001 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0001", new String[] { "\u0031" }); - // ÷ 0031 ÷ 0027 × 0308 ÷ 0001 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0031 ÷ 0027 × 0308 ÷ 0001 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0001", new String[] { "\u0031" }); - // ÷ 0031 ÷ 0027 ÷ 000D ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0031 ÷ 0027 ÷ 000D ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\r", new String[] { "\u0031" }); - // ÷ 0031 ÷ 0027 × 0308 ÷ 000D ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0031 ÷ 0027 × 0308 ÷ 000D ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\r", new String[] { "\u0031" }); - // ÷ 0031 ÷ 0027 ÷ 000A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0031 ÷ 0027 ÷ 000A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\n", new String[] { "\u0031" }); - // ÷ 0031 ÷ 0027 × 0308 ÷ 000A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0031 ÷ 0027 × 0308 ÷ 000A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\n", new String[] { "\u0031" }); - // ÷ 0031 ÷ 0027 ÷ 000B ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0031 ÷ 0027 ÷ 000B ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u000B", new String[] { "\u0031" }); - // ÷ 0031 ÷ 0027 × 0308 ÷ 000B ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0031 ÷ 0027 × 0308 ÷ 000B ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u000B", new String[] { "\u0031" }); - // ÷ 0031 ÷ 0027 ÷ 3031 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0031 ÷ 0027 ÷ 3031 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u3031", new String[] { "\u0031", "\u3031" }); - // ÷ 0031 ÷ 0027 × 0308 ÷ 3031 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0031 ÷ 0027 × 0308 ÷ 3031 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u3031", new String[] { "\u0031", "\u3031" }); - // ÷ 0031 ÷ 0027 ÷ 0041 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0031 ÷ 0027 ÷ 0041 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0041", new String[] { "\u0031", "\u0041" }); - // ÷ 0031 ÷ 0027 × 0308 ÷ 0041 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0031 ÷ 0027 × 0308 ÷ 0041 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0041", new String[] { "\u0031", "\u0041" }); - // ÷ 0031 ÷ 0027 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 ÷ 0027 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u003A", new String[] { "\u0031" }); - // ÷ 0031 ÷ 0027 × 0308 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 ÷ 0027 × 0308 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u003A", new String[] { "\u0031" }); - // ÷ 0031 ÷ 0027 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 ÷ 0027 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u002C", new String[] { "\u0031" }); - // ÷ 0031 ÷ 0027 × 0308 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 ÷ 0027 × 0308 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u002C", new String[] { "\u0031" }); - // ÷ 0031 ÷ 0027 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 ÷ 0027 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0027", new String[] { "\u0031" }); - // ÷ 0031 ÷ 0027 × 0308 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 ÷ 0027 × 0308 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0027", new String[] { "\u0031" }); - // ÷ 0031 × 0027 × 0030 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [11.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0031 × 0027 × 0030 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [11.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0030", new String[] { "\u0031\u0027\u0030" }); - // ÷ 0031 × 0027 × 0308 × 0030 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0031 × 0027 × 0308 × 0030 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0030", new String[] { "\u0031\u0027\u0308\u0030" }); - // ÷ 0031 ÷ 0027 ÷ 005F ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0031 ÷ 0027 ÷ 005F ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u005F", new String[] { "\u0031" }); - // ÷ 0031 ÷ 0027 × 0308 ÷ 005F ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0031 ÷ 0027 × 0308 ÷ 005F ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u005F", new String[] { "\u0031" }); - // ÷ 0031 ÷ 0027 × 00AD ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 0027 × 00AD ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u00AD", new String[] { "\u0031" }); - // ÷ 0031 ÷ 0027 × 0308 × 00AD ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 0027 × 0308 × 00AD ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u00AD", new String[] { "\u0031" }); - // ÷ 0031 ÷ 0027 × 0300 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0031 ÷ 0027 × 0300 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0300", new String[] { "\u0031" }); - // ÷ 0031 ÷ 0027 × 0308 × 0300 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0031 ÷ 0027 × 0308 × 0300 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0300", new String[] { "\u0031" }); - // ÷ 0031 ÷ 0027 ÷ 0061 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 0027 ÷ 0061 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0061\u2060", new String[] { "\u0031", "\u0061\u2060" }); - // ÷ 0031 ÷ 0027 × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 0027 × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0061\u2060", new String[] { "\u0031", "\u0061\u2060" }); - // ÷ 0031 ÷ 0027 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 ÷ 0027 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0061\u003A", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 0027 × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 ÷ 0027 × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0061\u003A", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 0027 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 ÷ 0027 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0061\u0027", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 0027 × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 ÷ 0027 × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0061\u0027", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 0027 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 0027 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0061\u0027\u2060", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 0027 × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 0027 × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0061\u0027\u2060", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 0027 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 ÷ 0027 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0061\u002C", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 0027 × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 ÷ 0027 × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0061\u002C", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 × 0027 × 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 × 0027 × 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0031\u003A", new String[] { "\u0031\u0027\u0031" }); - // ÷ 0031 × 0027 × 0308 × 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 × 0027 × 0308 × 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0031\u003A", new String[] { "\u0031\u0027\u0308\u0031" }); - // ÷ 0031 × 0027 × 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 × 0027 × 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0031\u0027", new String[] { "\u0031\u0027\u0031" }); - // ÷ 0031 × 0027 × 0308 × 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 × 0027 × 0308 × 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0031\u0027", new String[] { "\u0031\u0027\u0308\u0031" }); - // ÷ 0031 × 0027 × 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 × 0027 × 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0031\u002C", new String[] { "\u0031\u0027\u0031" }); - // ÷ 0031 × 0027 × 0308 × 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 × 0027 × 0308 × 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0031\u002C", new String[] { "\u0031\u0027\u0308\u0031" }); - // ÷ 0031 × 0027 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 × 0027 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0031\u002E\u2060", new String[] { "\u0031\u0027\u0031" }); - // ÷ 0031 × 0027 × 0308 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 × 0027 × 0308 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] APOSTROPHE (MidNumLet) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u0027\u0308\u0031\u002E\u2060", new String[] { "\u0031\u0027\u0308\u0031" }); - // ÷ 0031 ÷ 002C ÷ 0001 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0031 ÷ 002C ÷ 0001 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0001", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002C × 0308 ÷ 0001 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0031 ÷ 002C × 0308 ÷ 0001 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0001", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002C ÷ 000D ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0031 ÷ 002C ÷ 000D ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\r", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002C × 0308 ÷ 000D ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0031 ÷ 002C × 0308 ÷ 000D ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\r", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002C ÷ 000A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0031 ÷ 002C ÷ 000A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\n", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002C × 0308 ÷ 000A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0031 ÷ 002C × 0308 ÷ 000A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\n", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002C ÷ 000B ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0031 ÷ 002C ÷ 000B ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u000B", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002C × 0308 ÷ 000B ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0031 ÷ 002C × 0308 ÷ 000B ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u000B", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002C ÷ 3031 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0031 ÷ 002C ÷ 3031 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u3031", new String[] { "\u0031", "\u3031" }); - // ÷ 0031 ÷ 002C × 0308 ÷ 3031 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0031 ÷ 002C × 0308 ÷ 3031 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u3031", new String[] { "\u0031", "\u3031" }); - // ÷ 0031 ÷ 002C ÷ 0041 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0031 ÷ 002C ÷ 0041 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0041", new String[] { "\u0031", "\u0041" }); - // ÷ 0031 ÷ 002C × 0308 ÷ 0041 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0031 ÷ 002C × 0308 ÷ 0041 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0041", new String[] { "\u0031", "\u0041" }); - // ÷ 0031 ÷ 002C ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 ÷ 002C ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u003A", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002C × 0308 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 ÷ 002C × 0308 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u003A", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002C ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 ÷ 002C ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u002C", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002C × 0308 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 ÷ 002C × 0308 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u002C", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002C ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 ÷ 002C ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0027", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002C × 0308 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 ÷ 002C × 0308 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0027", new String[] { "\u0031" }); - // ÷ 0031 × 002C × 0030 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [11.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0031 × 002C × 0030 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [11.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0030", new String[] { "\u0031\u002C\u0030" }); - // ÷ 0031 × 002C × 0308 × 0030 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0031 × 002C × 0308 × 0030 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0030", new String[] { "\u0031\u002C\u0308\u0030" }); - // ÷ 0031 ÷ 002C ÷ 005F ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0031 ÷ 002C ÷ 005F ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u005F", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002C × 0308 ÷ 005F ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0031 ÷ 002C × 0308 ÷ 005F ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u005F", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002C × 00AD ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 002C × 00AD ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u00AD", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002C × 0308 × 00AD ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 002C × 0308 × 00AD ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u00AD", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002C × 0300 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0031 ÷ 002C × 0300 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0300", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002C × 0308 × 0300 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0031 ÷ 002C × 0308 × 0300 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0300", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002C ÷ 0061 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 002C ÷ 0061 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0061\u2060", new String[] { "\u0031", "\u0061\u2060" }); - // ÷ 0031 ÷ 002C × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 002C × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0061\u2060", new String[] { "\u0031", "\u0061\u2060" }); - // ÷ 0031 ÷ 002C ÷ 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 ÷ 002C ÷ 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0061\u003A", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 002C × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 ÷ 002C × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0061\u003A", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 002C ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 ÷ 002C ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0061\u0027", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 002C × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 ÷ 002C × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0061\u0027", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 002C ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 002C ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0061\u0027\u2060", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 002C × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 002C × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0061\u0027\u2060", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 002C ÷ 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 ÷ 002C ÷ 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0061\u002C", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 002C × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 ÷ 002C × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0061\u002C", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 × 002C × 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 × 002C × 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0031\u003A", new String[] { "\u0031\u002C\u0031" }); - // ÷ 0031 × 002C × 0308 × 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 × 002C × 0308 × 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0031\u003A", new String[] { "\u0031\u002C\u0308\u0031" }); - // ÷ 0031 × 002C × 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 × 002C × 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0031\u0027", new String[] { "\u0031\u002C\u0031" }); - // ÷ 0031 × 002C × 0308 × 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 × 002C × 0308 × 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0031\u0027", new String[] { "\u0031\u002C\u0308\u0031" }); - // ÷ 0031 × 002C × 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 × 002C × 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0031\u002C", new String[] { "\u0031\u002C\u0031" }); - // ÷ 0031 × 002C × 0308 × 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 × 002C × 0308 × 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0031\u002C", new String[] { "\u0031\u002C\u0308\u0031" }); - // ÷ 0031 × 002C × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 × 002C × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0031\u002E\u2060", new String[] { "\u0031\u002C\u0031" }); - // ÷ 0031 × 002C × 0308 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 × 002C × 0308 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] COMMA (MidNum) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002C\u0308\u0031\u002E\u2060", new String[] { "\u0031\u002C\u0308\u0031" }); - // ÷ 0031 ÷ 002E × 2060 ÷ 0001 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 ÷ 0001 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0001", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0001 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0001 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] (Other) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0001", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002E × 2060 ÷ 000D ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 ÷ 000D ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\r", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 000D ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 000D ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (CR) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\r", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002E × 2060 ÷ 000A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 ÷ 000A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\n", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 000A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 000A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (LF) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\n", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002E × 2060 ÷ 000B ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 ÷ 000B ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u000B", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 000B ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 000B ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [3.2] (Newline) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u000B", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002E × 2060 ÷ 3031 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 ÷ 3031 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u3031", new String[] { "\u0031", "\u3031" }); - // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 3031 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 3031 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] VERTICAL KANA REPEAT MARK (Katakana) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u3031", new String[] { "\u0031", "\u3031" }); - // ÷ 0031 ÷ 002E × 2060 ÷ 0041 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 ÷ 0041 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0041", new String[] { "\u0031", "\u0041" }); - // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0041 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0041 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN CAPITAL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0041", new String[] { "\u0031", "\u0041" }); - // ÷ 0031 ÷ 002E × 2060 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u003A", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u003A", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002E × 2060 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u002C", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u002C", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002E × 2060 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0027", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0027", new String[] { "\u0031" }); - // ÷ 0031 × 002E × 2060 × 0030 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0031 × 002E × 2060 × 0030 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0030", new String[] { "\u0031\u002E\u2060\u0030" }); - // ÷ 0031 × 002E × 2060 × 0308 × 0030 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ZERO (Numeric) ÷ [0.3] + // ÷ 0031 × 002E × 2060 × 0308 × 0030 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ZERO (Numeric) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0030", new String[] { "\u0031\u002E\u2060\u0308\u0030" }); - // ÷ 0031 ÷ 002E × 2060 ÷ 005F ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 ÷ 005F ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u005F", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 005F ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 005F ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LOW LINE (ExtendNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u005F", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002E × 2060 × 00AD ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 × 00AD ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u00AD", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002E × 2060 × 0308 × 00AD ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 × 0308 × 00AD ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] SOFT HYPHEN (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u00AD", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002E × 2060 × 0300 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 × 0300 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0300", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002E × 2060 × 0308 × 0300 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 × 0308 × 0300 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [4.0] COMBINING GRAVE ACCENT (Extend_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0300", new String[] { "\u0031" }); - // ÷ 0031 ÷ 002E × 2060 ÷ 0061 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 ÷ 0061 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0061\u2060", new String[] { "\u0031", "\u0061\u2060" }); - // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0061 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0061\u2060", new String[] { "\u0031", "\u0061\u2060" }); - // ÷ 0031 ÷ 002E × 2060 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0061\u003A", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0061 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0061\u003A", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 002E × 2060 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0061\u0027", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0061 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0061\u0027", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 002E × 2060 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0061\u0027\u2060", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0061 ÷ 0027 × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0061\u0027\u2060", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 002E × 2060 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0061\u002C", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 ÷ 002E × 2060 × 0308 ÷ 0061 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0061\u002C", new String[] { "\u0031", "\u0061" }); - // ÷ 0031 × 002E × 2060 × 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 × 002E × 2060 × 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0031\u003A", new String[] { "\u0031\u002E\u2060\u0031" }); - // ÷ 0031 × 002E × 2060 × 0308 × 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] + // ÷ 0031 × 002E × 2060 × 0308 × 0031 ÷ 003A ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COLON (MidLetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0031\u003A", new String[] { "\u0031\u002E\u2060\u0308\u0031" }); - // ÷ 0031 × 002E × 2060 × 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 × 002E × 2060 × 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0031\u0027", new String[] { "\u0031\u002E\u2060\u0031" }); - // ÷ 0031 × 002E × 2060 × 0308 × 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] + // ÷ 0031 × 002E × 2060 × 0308 × 0031 ÷ 0027 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] APOSTROPHE (MidNumLet) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0031\u0027", new String[] { "\u0031\u002E\u2060\u0308\u0031" }); - // ÷ 0031 × 002E × 2060 × 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 × 002E × 2060 × 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0031\u002C", new String[] { "\u0031\u002E\u2060\u0031" }); - // ÷ 0031 × 002E × 2060 × 0308 × 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] + // ÷ 0031 × 002E × 2060 × 0308 × 0031 ÷ 002C ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] COMMA (MidNum) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0031\u002C", new String[] { "\u0031\u002E\u2060\u0308\u0031" }); - // ÷ 0031 × 002E × 2060 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 × 002E × 2060 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0031\u002E\u2060", new String[] { "\u0031\u002E\u2060\u0031" }); - // ÷ 0031 × 002E × 2060 × 0308 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 0031 × 002E × 2060 × 0308 × 0031 ÷ 002E × 2060 ÷ # ÷ [0.2] DIGIT ONE (Numeric) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [4.0] COMBINING DIAERESIS (Extend_FE) × [11.0] DIGIT ONE (Numeric) ÷ [999.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0031\u002E\u2060\u0308\u0031\u002E\u2060", new String[] { "\u0031\u002E\u2060\u0308\u0031" }); - // ÷ 0063 × 0061 × 006E × 0027 × 0074 ÷ # ÷ [0.2] LATIN SMALL LETTER C (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER N (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN SMALL LETTER T (ALetter) ÷ [0.3] + // ÷ 0063 × 0061 × 006E × 0027 × 0074 ÷ # ÷ [0.2] LATIN SMALL LETTER C (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER N (ALetter) × [6.0] APOSTROPHE (MidNumLet) × [7.0] LATIN SMALL LETTER T (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0063\u0061\u006E\u0027\u0074", new String[] { "\u0063\u0061\u006E\u0027\u0074" }); - // ÷ 0063 × 0061 × 006E × 2019 × 0074 ÷ # ÷ [0.2] LATIN SMALL LETTER C (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER N (ALetter) × [6.0] RIGHT SINGLE QUOTATION MARK (MidNumLet) × [7.0] LATIN SMALL LETTER T (ALetter) ÷ [0.3] + // ÷ 0063 × 0061 × 006E × 2019 × 0074 ÷ # ÷ [0.2] LATIN SMALL LETTER C (ALetter) × [5.0] LATIN SMALL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER N (ALetter) × [6.0] RIGHT SINGLE QUOTATION MARK (MidNumLet) × [7.0] LATIN SMALL LETTER T (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0063\u0061\u006E\u2019\u0074", new String[] { "\u0063\u0061\u006E\u2019\u0074" }); - // ÷ 0061 × 0062 × 00AD × 0062 × 0079 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER B (ALetter) × [4.0] SOFT HYPHEN (Format_FE) × [5.0] LATIN SMALL LETTER B (ALetter) × [5.0] LATIN SMALL LETTER Y (ALetter) ÷ [0.3] + // ÷ 0061 × 0062 × 00AD × 0062 × 0079 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) × [5.0] LATIN SMALL LETTER B (ALetter) × [4.0] SOFT HYPHEN (Format_FE) × [5.0] LATIN SMALL LETTER B (ALetter) × [5.0] LATIN SMALL LETTER Y (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0062\u00AD\u0062\u0079", new String[] { "\u0061\u0062\u00AD\u0062\u0079" }); - // ÷ 0061 ÷ 0024 ÷ 002D ÷ 0033 × 0034 × 002C × 0035 × 0036 × 0037 × 002E × 0031 × 0034 ÷ 0025 ÷ 0062 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] DOLLAR SIGN (Other) ÷ [999.0] HYPHEN-MINUS (Other) ÷ [999.0] DIGIT THREE (Numeric) × [8.0] DIGIT FOUR (Numeric) × [12.0] COMMA (MidNum) × [11.0] DIGIT FIVE (Numeric) × [8.0] DIGIT SIX (Numeric) × [8.0] DIGIT SEVEN (Numeric) × [12.0] FULL STOP (MidNumLet) × [11.0] DIGIT ONE (Numeric) × [8.0] DIGIT FOUR (Numeric) ÷ [999.0] PERCENT SIGN (Other) ÷ [999.0] LATIN SMALL LETTER B (ALetter) ÷ [0.3] + // ÷ 0061 ÷ 0024 ÷ 002D ÷ 0033 × 0034 × 002C × 0035 × 0036 × 0037 × 002E × 0031 × 0034 ÷ 0025 ÷ 0062 ÷ # ÷ [0.2] LATIN SMALL LETTER A (ALetter) ÷ [999.0] DOLLAR SIGN (Other) ÷ [999.0] HYPHEN-MINUS (Other) ÷ [999.0] DIGIT THREE (Numeric) × [8.0] DIGIT FOUR (Numeric) × [12.0] COMMA (MidNum) × [11.0] DIGIT FIVE (Numeric) × [8.0] DIGIT SIX (Numeric) × [8.0] DIGIT SEVEN (Numeric) × [12.0] FULL STOP (MidNumLet) × [11.0] DIGIT ONE (Numeric) × [8.0] DIGIT FOUR (Numeric) ÷ [999.0] PERCENT SIGN (Other) ÷ [999.0] LATIN SMALL LETTER B (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0061\u0024\u002D\u0033\u0034\u002C\u0035\u0036\u0037\u002E\u0031\u0034\u0025\u0062", new String[] { "\u0061", "\u0033\u0034\u002C\u0035\u0036\u0037\u002E\u0031\u0034", "\u0062" }); - // ÷ 0033 × 0061 ÷ # ÷ [0.2] DIGIT THREE (Numeric) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [0.3] + // ÷ 0033 × 0061 ÷ # ÷ [0.2] DIGIT THREE (Numeric) × [10.0] LATIN SMALL LETTER A (ALetter) ÷ [0.3] assertAnalyzesTo(analyzer, "\u0033\u0061", new String[] { "\u0033\u0061" }); - // ÷ 2060 ÷ 0063 × 2060 × 0061 × 2060 × 006E × 2060 × 0027 × 2060 × 0074 × 2060 × 2060 ÷ # ÷ [0.2] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER C (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER N (ALetter) × [4.0] WORD JOINER (Format_FE) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER T (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 2060 ÷ 0063 × 2060 × 0061 × 2060 × 006E × 2060 × 0027 × 2060 × 0074 × 2060 × 2060 ÷ # ÷ [0.2] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER C (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER N (ALetter) × [4.0] WORD JOINER (Format_FE) × [6.0] APOSTROPHE (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER T (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u2060\u0063\u2060\u0061\u2060\u006E\u2060\u0027\u2060\u0074\u2060\u2060", new String[] { "\u0063\u2060\u0061\u2060\u006E\u2060\u0027\u2060\u0074\u2060\u2060" }); - // ÷ 2060 ÷ 0063 × 2060 × 0061 × 2060 × 006E × 2060 × 2019 × 2060 × 0074 × 2060 × 2060 ÷ # ÷ [0.2] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER C (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER N (ALetter) × [4.0] WORD JOINER (Format_FE) × [6.0] RIGHT SINGLE QUOTATION MARK (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER T (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 2060 ÷ 0063 × 2060 × 0061 × 2060 × 006E × 2060 × 2019 × 2060 × 0074 × 2060 × 2060 ÷ # ÷ [0.2] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER C (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER N (ALetter) × [4.0] WORD JOINER (Format_FE) × [6.0] RIGHT SINGLE QUOTATION MARK (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [7.0] LATIN SMALL LETTER T (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u2060\u0063\u2060\u0061\u2060\u006E\u2060\u2019\u2060\u0074\u2060\u2060", new String[] { "\u0063\u2060\u0061\u2060\u006E\u2060\u2019\u2060\u0074\u2060\u2060" }); - // ÷ 2060 ÷ 0061 × 2060 × 0062 × 2060 × 00AD × 2060 × 0062 × 2060 × 0079 × 2060 × 2060 ÷ # ÷ [0.2] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER B (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] SOFT HYPHEN (Format_FE) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER B (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER Y (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 2060 ÷ 0061 × 2060 × 0062 × 2060 × 00AD × 2060 × 0062 × 2060 × 0079 × 2060 × 2060 ÷ # ÷ [0.2] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER B (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] SOFT HYPHEN (Format_FE) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER B (ALetter) × [4.0] WORD JOINER (Format_FE) × [5.0] LATIN SMALL LETTER Y (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u2060\u0061\u2060\u0062\u2060\u00AD\u2060\u0062\u2060\u0079\u2060\u2060", new String[] { "\u0061\u2060\u0062\u2060\u00AD\u2060\u0062\u2060\u0079\u2060\u2060" }); - // ÷ 2060 ÷ 0061 × 2060 ÷ 0024 × 2060 ÷ 002D × 2060 ÷ 0033 × 2060 × 0034 × 2060 × 002C × 2060 × 0035 × 2060 × 0036 × 2060 × 0037 × 2060 × 002E × 2060 × 0031 × 2060 × 0034 × 2060 ÷ 0025 × 2060 ÷ 0062 × 2060 × 2060 ÷ # ÷ [0.2] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DOLLAR SIGN (Other) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] HYPHEN-MINUS (Other) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DIGIT THREE (Numeric) × [4.0] WORD JOINER (Format_FE) × [8.0] DIGIT FOUR (Numeric) × [4.0] WORD JOINER (Format_FE) × [12.0] COMMA (MidNum) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT FIVE (Numeric) × [4.0] WORD JOINER (Format_FE) × [8.0] DIGIT SIX (Numeric) × [4.0] WORD JOINER (Format_FE) × [8.0] DIGIT SEVEN (Numeric) × [4.0] WORD JOINER (Format_FE) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT ONE (Numeric) × [4.0] WORD JOINER (Format_FE) × [8.0] DIGIT FOUR (Numeric) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] PERCENT SIGN (Other) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER B (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 2060 ÷ 0061 × 2060 ÷ 0024 × 2060 ÷ 002D × 2060 ÷ 0033 × 2060 × 0034 × 2060 × 002C × 2060 × 0035 × 2060 × 0036 × 2060 × 0037 × 2060 × 002E × 2060 × 0031 × 2060 × 0034 × 2060 ÷ 0025 × 2060 ÷ 0062 × 2060 × 2060 ÷ # ÷ [0.2] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DOLLAR SIGN (Other) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] HYPHEN-MINUS (Other) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] DIGIT THREE (Numeric) × [4.0] WORD JOINER (Format_FE) × [8.0] DIGIT FOUR (Numeric) × [4.0] WORD JOINER (Format_FE) × [12.0] COMMA (MidNum) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT FIVE (Numeric) × [4.0] WORD JOINER (Format_FE) × [8.0] DIGIT SIX (Numeric) × [4.0] WORD JOINER (Format_FE) × [8.0] DIGIT SEVEN (Numeric) × [4.0] WORD JOINER (Format_FE) × [12.0] FULL STOP (MidNumLet) × [4.0] WORD JOINER (Format_FE) × [11.0] DIGIT ONE (Numeric) × [4.0] WORD JOINER (Format_FE) × [8.0] DIGIT FOUR (Numeric) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] PERCENT SIGN (Other) × [4.0] WORD JOINER (Format_FE) ÷ [999.0] LATIN SMALL LETTER B (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u2060\u0061\u2060\u0024\u2060\u002D\u2060\u0033\u2060\u0034\u2060\u002C\u2060\u0035\u2060\u0036\u2060\u0037\u2060\u002E\u2060\u0031\u2060\u0034\u2060\u0025\u2060\u0062\u2060\u2060", new String[] { "\u0061\u2060", "\u0033\u2060\u0034\u2060\u002C\u2060\u0035\u2060\u0036\u2060\u0037\u2060\u002E\u2060\u0031\u2060\u0034\u2060", "\u0062\u2060\u2060" }); - // ÷ 2060 ÷ 0033 × 2060 × 0061 × 2060 × 2060 ÷ # ÷ [0.2] WORD JOINER (Format_FE) ÷ [999.0] DIGIT THREE (Numeric) × [4.0] WORD JOINER (Format_FE) × [10.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] + // ÷ 2060 ÷ 0033 × 2060 × 0061 × 2060 × 2060 ÷ # ÷ [0.2] WORD JOINER (Format_FE) ÷ [999.0] DIGIT THREE (Numeric) × [4.0] WORD JOINER (Format_FE) × [10.0] LATIN SMALL LETTER A (ALetter) × [4.0] WORD JOINER (Format_FE) × [4.0] WORD JOINER (Format_FE) ÷ [0.3] assertAnalyzesTo(analyzer, "\u2060\u0033\u2060\u0061\u2060\u2060", new String[] { "\u0033\u2060\u0061\u2060\u2060" }); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestHyphenatedWordsFilter.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestHyphenatedWordsFilter.java (revision 1381159) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestHyphenatedWordsFilter.java (working copy) @@ -32,37 +32,37 @@ * HyphenatedWordsFilter test */ public class TestHyphenatedWordsFilter extends BaseTokenStreamTestCase { - public void testHyphenatedWords() throws Exception { - String input = "ecologi-\r\ncal devel-\r\n\r\nop compre-\u0009hensive-hands-on and ecologi-\ncal"; - // first test - TokenStream ts = new MockTokenizer(new StringReader(input), MockTokenizer.WHITESPACE, false); - ts = new HyphenatedWordsFilter(ts); - assertTokenStreamContents(ts, - new String[] { "ecological", "develop", "comprehensive-hands-on", "and", "ecological" }); - } - - /** - * Test that HyphenatedWordsFilter behaves correctly with a final hyphen - */ - public void testHyphenAtEnd() throws Exception { - String input = "ecologi-\r\ncal devel-\r\n\r\nop compre-\u0009hensive-hands-on and ecology-"; - // first test - TokenStream ts = new MockTokenizer(new StringReader(input), MockTokenizer.WHITESPACE, false); - ts = new HyphenatedWordsFilter(ts); - assertTokenStreamContents(ts, - new String[] { "ecological", "develop", "comprehensive-hands-on", "and", "ecology-" }); - } - - public void testOffsets() throws Exception { - String input = "abc- def geh 1234- 5678-"; + public void testHyphenatedWords() throws Exception { + String input = "ecologi-\r\ncal devel-\r\n\r\nop compre-\u0009hensive-hands-on and ecologi-\ncal"; + // first test TokenStream ts = new MockTokenizer(new StringReader(input), MockTokenizer.WHITESPACE, false); ts = new HyphenatedWordsFilter(ts); + assertTokenStreamContents(ts, + new String[] { "ecological", "develop", "comprehensive-hands-on", "and", "ecological" }); + } + + /** + * Test that HyphenatedWordsFilter behaves correctly with a final hyphen + */ + public void testHyphenAtEnd() throws Exception { + String input = "ecologi-\r\ncal devel-\r\n\r\nop compre-\u0009hensive-hands-on and ecology-"; + // first test + TokenStream ts = new MockTokenizer(new StringReader(input), MockTokenizer.WHITESPACE, false); + ts = new HyphenatedWordsFilter(ts); + assertTokenStreamContents(ts, + new String[] { "ecological", "develop", "comprehensive-hands-on", "and", "ecology-" }); + } + + public void testOffsets() throws Exception { + String input = "abc- def geh 1234- 5678-"; + TokenStream ts = new MockTokenizer(new StringReader(input), MockTokenizer.WHITESPACE, false); + ts = new HyphenatedWordsFilter(ts); assertTokenStreamContents(ts, new String[] { "abcdef", "geh", "12345678-" }, new int[] { 0, 9, 13 }, new int[] { 8, 12, 24 }); - } - + } + /** blast some random strings through the analyzer */ public void testRandomString() throws Exception { Analyzer a = new Analyzer() { Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java (revision 1381159) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java (working copy) @@ -38,87 +38,87 @@ public class TestBrazilianStemmer extends BaseTokenStreamTestCase { public void testWithSnowballExamples() throws Exception { - check("boa", "boa"); - check("boainain", "boainain"); - check("boas", "boas"); - check("bôas", "boas"); // removes diacritic: different from snowball portugese - check("boassu", "boassu"); - check("boataria", "boat"); - check("boate", "boat"); - check("boates", "boat"); - check("boatos", "boat"); - check("bob", "bob"); - check("boba", "bob"); - check("bobagem", "bobag"); - check("bobagens", "bobagens"); - check("bobalhões", "bobalho"); // removes diacritic: different from snowball portugese - check("bobear", "bob"); - check("bobeira", "bobeir"); - check("bobinho", "bobinh"); - check("bobinhos", "bobinh"); - check("bobo", "bob"); - check("bobs", "bobs"); - check("boca", "boc"); - check("bocadas", "boc"); - check("bocadinho", "bocadinh"); - check("bocado", "boc"); - check("bocaiúva", "bocaiuv"); // removes diacritic: different from snowball portuguese - check("boçal", "bocal"); // removes diacritic: different from snowball portuguese - check("bocarra", "bocarr"); - check("bocas", "boc"); - check("bode", "bod"); - check("bodoque", "bodoqu"); - check("body", "body"); - check("boeing", "boeing"); - check("boem", "boem"); - check("boemia", "boem"); - check("boêmio", "boemi"); // removes diacritic: different from snowball portuguese - check("bogotá", "bogot"); - check("boi", "boi"); - check("bóia", "boi"); // removes diacritic: different from snowball portuguese - check("boiando", "boi"); - check("quiabo", "quiab"); - check("quicaram", "quic"); - check("quickly", "quickly"); - check("quieto", "quiet"); - check("quietos", "quiet"); - check("quilate", "quilat"); - check("quilates", "quilat"); - check("quilinhos", "quilinh"); - check("quilo", "quil"); - check("quilombo", "quilomb"); - check("quilométricas", "quilometr"); // removes diacritic: different from snowball portuguese - check("quilométricos", "quilometr"); // removes diacritic: different from snowball portuguese - check("quilômetro", "quilometr"); // removes diacritic: different from snowball portoguese - check("quilômetros", "quilometr"); // removes diacritic: different from snowball portoguese - check("quilos", "quil"); - check("quimica", "quimic"); - check("quilos", "quil"); - check("quimica", "quimic"); - check("quimicas", "quimic"); - check("quimico", "quimic"); - check("quimicos", "quimic"); - check("quimioterapia", "quimioterap"); - check("quimioterápicos", "quimioterap"); // removes diacritic: different from snowball portoguese - check("quimono", "quimon"); - check("quincas", "quinc"); - check("quinhão", "quinha"); // removes diacritic: different from snowball portoguese - check("quinhentos", "quinhent"); - check("quinn", "quinn"); - check("quino", "quin"); - check("quinta", "quint"); - check("quintal", "quintal"); - check("quintana", "quintan"); - check("quintanilha", "quintanilh"); - check("quintão", "quinta"); // removes diacritic: different from snowball portoguese - check("quintessência", "quintessente"); // versus snowball portuguese 'quintessent' - check("quintino", "quintin"); - check("quinto", "quint"); - check("quintos", "quint"); - check("quintuplicou", "quintuplic"); - check("quinze", "quinz"); - check("quinzena", "quinzen"); - check("quiosque", "quiosqu"); + check("boa", "boa"); + check("boainain", "boainain"); + check("boas", "boas"); + check("bôas", "boas"); // removes diacritic: different from snowball portugese + check("boassu", "boassu"); + check("boataria", "boat"); + check("boate", "boat"); + check("boates", "boat"); + check("boatos", "boat"); + check("bob", "bob"); + check("boba", "bob"); + check("bobagem", "bobag"); + check("bobagens", "bobagens"); + check("bobalhões", "bobalho"); // removes diacritic: different from snowball portugese + check("bobear", "bob"); + check("bobeira", "bobeir"); + check("bobinho", "bobinh"); + check("bobinhos", "bobinh"); + check("bobo", "bob"); + check("bobs", "bobs"); + check("boca", "boc"); + check("bocadas", "boc"); + check("bocadinho", "bocadinh"); + check("bocado", "boc"); + check("bocaiúva", "bocaiuv"); // removes diacritic: different from snowball portuguese + check("boçal", "bocal"); // removes diacritic: different from snowball portuguese + check("bocarra", "bocarr"); + check("bocas", "boc"); + check("bode", "bod"); + check("bodoque", "bodoqu"); + check("body", "body"); + check("boeing", "boeing"); + check("boem", "boem"); + check("boemia", "boem"); + check("boêmio", "boemi"); // removes diacritic: different from snowball portuguese + check("bogotá", "bogot"); + check("boi", "boi"); + check("bóia", "boi"); // removes diacritic: different from snowball portuguese + check("boiando", "boi"); + check("quiabo", "quiab"); + check("quicaram", "quic"); + check("quickly", "quickly"); + check("quieto", "quiet"); + check("quietos", "quiet"); + check("quilate", "quilat"); + check("quilates", "quilat"); + check("quilinhos", "quilinh"); + check("quilo", "quil"); + check("quilombo", "quilomb"); + check("quilométricas", "quilometr"); // removes diacritic: different from snowball portuguese + check("quilométricos", "quilometr"); // removes diacritic: different from snowball portuguese + check("quilômetro", "quilometr"); // removes diacritic: different from snowball portoguese + check("quilômetros", "quilometr"); // removes diacritic: different from snowball portoguese + check("quilos", "quil"); + check("quimica", "quimic"); + check("quilos", "quil"); + check("quimica", "quimic"); + check("quimicas", "quimic"); + check("quimico", "quimic"); + check("quimicos", "quimic"); + check("quimioterapia", "quimioterap"); + check("quimioterápicos", "quimioterap"); // removes diacritic: different from snowball portoguese + check("quimono", "quimon"); + check("quincas", "quinc"); + check("quinhão", "quinha"); // removes diacritic: different from snowball portoguese + check("quinhentos", "quinhent"); + check("quinn", "quinn"); + check("quino", "quin"); + check("quinta", "quint"); + check("quintal", "quintal"); + check("quintana", "quintan"); + check("quintanilha", "quintanilh"); + check("quintão", "quinta"); // removes diacritic: different from snowball portoguese + check("quintessência", "quintessente"); // versus snowball portuguese 'quintessent' + check("quintino", "quintin"); + check("quinto", "quint"); + check("quintos", "quint"); + check("quintuplicou", "quintuplic"); + check("quinze", "quinz"); + check("quinzena", "quinzen"); + check("quiosque", "quiosqu"); } public void testNormalization() throws Exception { Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java (revision 1381159) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java (working copy) @@ -31,181 +31,181 @@ public class TestFrenchAnalyzer extends BaseTokenStreamTestCase { - public void testAnalyzer() throws Exception { - FrenchAnalyzer fa = new FrenchAnalyzer(TEST_VERSION_CURRENT); - - assertAnalyzesTo(fa, "", new String[] { - }); + public void testAnalyzer() throws Exception { + FrenchAnalyzer fa = new FrenchAnalyzer(TEST_VERSION_CURRENT); - assertAnalyzesTo( - fa, - "chien chat cheval", - new String[] { "chien", "chat", "cheval" }); + assertAnalyzesTo(fa, "", new String[] { + }); - assertAnalyzesTo( - fa, - "chien CHAT CHEVAL", - new String[] { "chien", "chat", "cheval" }); + assertAnalyzesTo( + fa, + "chien chat cheval", + new String[] { "chien", "chat", "cheval" }); - assertAnalyzesTo( - fa, - " chien ,? + = - CHAT /: > CHEVAL", - new String[] { "chien", "chat", "cheval" }); + assertAnalyzesTo( + fa, + "chien CHAT CHEVAL", + new String[] { "chien", "chat", "cheval" }); - assertAnalyzesTo(fa, "chien++", new String[] { "chien" }); + assertAnalyzesTo( + fa, + " chien ,? + = - CHAT /: > CHEVAL", + new String[] { "chien", "chat", "cheval" }); - assertAnalyzesTo( - fa, - "mot \"entreguillemet\"", - new String[] { "mot", "entreguilemet" }); + assertAnalyzesTo(fa, "chien++", new String[] { "chien" }); - // let's do some french specific tests now + assertAnalyzesTo( + fa, + "mot \"entreguillemet\"", + new String[] { "mot", "entreguilemet" }); - /* 1. couldn't resist - I would expect this to stay one term as in French the minus - sign is often used for composing words */ - assertAnalyzesTo( - fa, - "Jean-François", - new String[] { "jean", "francoi" }); + // let's do some french specific tests now - // 2. stopwords - assertAnalyzesTo( - fa, - "le la chien les aux chat du des à cheval", - new String[] { "chien", "chat", "cheval" }); + /* 1. couldn't resist + I would expect this to stay one term as in French the minus + sign is often used for composing words */ + assertAnalyzesTo( + fa, + "Jean-François", + new String[] { "jean", "francoi" }); - // some nouns and adjectives - assertAnalyzesTo( - fa, - "lances chismes habitable chiste éléments captifs", - new String[] { - "lanc", - "chism", - "habitabl", - "chist", - "element", - "captif" }); + // 2. stopwords + assertAnalyzesTo( + fa, + "le la chien les aux chat du des à cheval", + new String[] { "chien", "chat", "cheval" }); - // some verbs - assertAnalyzesTo( - fa, - "finissions souffrirent rugissante", - new String[] { "finision", "soufrirent", "rugisant" }); + // some nouns and adjectives + assertAnalyzesTo( + fa, + "lances chismes habitable chiste éléments captifs", + new String[] { + "lanc", + "chism", + "habitabl", + "chist", + "element", + "captif" }); - // some everything else - // aujourd'hui stays one term which is OK - assertAnalyzesTo( - fa, - "C3PO aujourd'hui oeuf ïâöûàä anticonstitutionnellement Java++ ", - new String[] { - "c3po", - "aujourd'hui", - "oeuf", - "ïaöuaä", - "anticonstitutionel", - "java" }); + // some verbs + assertAnalyzesTo( + fa, + "finissions souffrirent rugissante", + new String[] { "finision", "soufrirent", "rugisant" }); - // some more everything else - // here 1940-1945 stays as one term, 1940:1945 not ? - assertAnalyzesTo( - fa, - "33Bis 1940-1945 1940:1945 (---i+++)*", - new String[] { "33bi", "1940", "1945", "1940", "1945", "i" }); + // some everything else + // aujourd'hui stays one term which is OK + assertAnalyzesTo( + fa, + "C3PO aujourd'hui oeuf ïâöûàä anticonstitutionnellement Java++ ", + new String[] { + "c3po", + "aujourd'hui", + "oeuf", + "ïaöuaä", + "anticonstitutionel", + "java" }); - } - - /** - * @deprecated (3.1) remove this test for Lucene 5.0 - */ - @Deprecated - public void testAnalyzer30() throws Exception { - FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_30); - - assertAnalyzesTo(fa, "", new String[] { - }); + // some more everything else + // here 1940-1945 stays as one term, 1940:1945 not ? + assertAnalyzesTo( + fa, + "33Bis 1940-1945 1940:1945 (---i+++)*", + new String[] { "33bi", "1940", "1945", "1940", "1945", "i" }); - assertAnalyzesTo( - fa, - "chien chat cheval", - new String[] { "chien", "chat", "cheval" }); + } - assertAnalyzesTo( - fa, - "chien CHAT CHEVAL", - new String[] { "chien", "chat", "cheval" }); + /** + * @deprecated (3.1) remove this test for Lucene 5.0 + */ + @Deprecated + public void testAnalyzer30() throws Exception { + FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_30); - assertAnalyzesTo( - fa, - " chien ,? + = - CHAT /: > CHEVAL", - new String[] { "chien", "chat", "cheval" }); + assertAnalyzesTo(fa, "", new String[] { + }); - assertAnalyzesTo(fa, "chien++", new String[] { "chien" }); + assertAnalyzesTo( + fa, + "chien chat cheval", + new String[] { "chien", "chat", "cheval" }); - assertAnalyzesTo( - fa, - "mot \"entreguillemet\"", - new String[] { "mot", "entreguillemet" }); + assertAnalyzesTo( + fa, + "chien CHAT CHEVAL", + new String[] { "chien", "chat", "cheval" }); - // let's do some french specific tests now + assertAnalyzesTo( + fa, + " chien ,? + = - CHAT /: > CHEVAL", + new String[] { "chien", "chat", "cheval" }); - /* 1. couldn't resist - I would expect this to stay one term as in French the minus - sign is often used for composing words */ - assertAnalyzesTo( - fa, - "Jean-François", - new String[] { "jean", "françois" }); + assertAnalyzesTo(fa, "chien++", new String[] { "chien" }); - // 2. stopwords - assertAnalyzesTo( - fa, - "le la chien les aux chat du des à cheval", - new String[] { "chien", "chat", "cheval" }); + assertAnalyzesTo( + fa, + "mot \"entreguillemet\"", + new String[] { "mot", "entreguillemet" }); - // some nouns and adjectives - assertAnalyzesTo( - fa, - "lances chismes habitable chiste éléments captifs", - new String[] { - "lanc", - "chism", - "habit", - "chist", - "élément", - "captif" }); + // let's do some french specific tests now - // some verbs - assertAnalyzesTo( - fa, - "finissions souffrirent rugissante", - new String[] { "fin", "souffr", "rug" }); + /* 1. couldn't resist + I would expect this to stay one term as in French the minus + sign is often used for composing words */ + assertAnalyzesTo( + fa, + "Jean-François", + new String[] { "jean", "françois" }); - // some everything else - // aujourd'hui stays one term which is OK - assertAnalyzesTo( - fa, - "C3PO aujourd'hui oeuf ïâöûàä anticonstitutionnellement Java++ ", - new String[] { - "c3po", - "aujourd'hui", - "oeuf", - "ïâöûàä", - "anticonstitutionnel", - "jav" }); + // 2. stopwords + assertAnalyzesTo( + fa, + "le la chien les aux chat du des à cheval", + new String[] { "chien", "chat", "cheval" }); - // some more everything else - // here 1940-1945 stays as one term, 1940:1945 not ? - assertAnalyzesTo( - fa, - "33Bis 1940-1945 1940:1945 (---i+++)*", - new String[] { "33bis", "1940-1945", "1940", "1945", "i" }); + // some nouns and adjectives + assertAnalyzesTo( + fa, + "lances chismes habitable chiste éléments captifs", + new String[] { + "lanc", + "chism", + "habit", + "chist", + "élément", + "captif" }); - } - - public void testReusableTokenStream() throws Exception { - FrenchAnalyzer fa = new FrenchAnalyzer(TEST_VERSION_CURRENT); - // stopwords + // some verbs + assertAnalyzesTo( + fa, + "finissions souffrirent rugissante", + new String[] { "fin", "souffr", "rug" }); + + // some everything else + // aujourd'hui stays one term which is OK + assertAnalyzesTo( + fa, + "C3PO aujourd'hui oeuf ïâöûàä anticonstitutionnellement Java++ ", + new String[] { + "c3po", + "aujourd'hui", + "oeuf", + "ïâöûàä", + "anticonstitutionnel", + "jav" }); + + // some more everything else + // here 1940-1945 stays as one term, 1940:1945 not ? + assertAnalyzesTo( + fa, + "33Bis 1940-1945 1940:1945 (---i+++)*", + new String[] { "33bis", "1940-1945", "1940", "1945", "i" }); + + } + + public void testReusableTokenStream() throws Exception { + FrenchAnalyzer fa = new FrenchAnalyzer(TEST_VERSION_CURRENT); + // stopwords assertAnalyzesToReuse( fa, "le la chien les aux chat du des à cheval", @@ -222,7 +222,7 @@ "chist", "element", "captif" }); - } + } public void testExclusionTableViaCtor() throws Exception { CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java (revision 1381159) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java (working copy) @@ -34,83 +34,83 @@ public class TestDutchStemmer extends BaseTokenStreamTestCase { public void testWithSnowballExamples() throws Exception { - check("lichaamsziek", "lichaamsziek"); - check("lichamelijk", "licham"); - check("lichamelijke", "licham"); - check("lichamelijkheden", "licham"); - check("lichamen", "licham"); - check("lichere", "licher"); - check("licht", "licht"); - check("lichtbeeld", "lichtbeeld"); - check("lichtbruin", "lichtbruin"); - check("lichtdoorlatende", "lichtdoorlat"); - check("lichte", "licht"); - check("lichten", "licht"); - check("lichtende", "lichtend"); - check("lichtenvoorde", "lichtenvoord"); - check("lichter", "lichter"); - check("lichtere", "lichter"); - check("lichters", "lichter"); - check("lichtgevoeligheid", "lichtgevoel"); - check("lichtgewicht", "lichtgewicht"); - check("lichtgrijs", "lichtgrijs"); - check("lichthoeveelheid", "lichthoevel"); - check("lichtintensiteit", "lichtintensiteit"); - check("lichtje", "lichtj"); - check("lichtjes", "lichtjes"); - check("lichtkranten", "lichtkrant"); - check("lichtkring", "lichtkring"); - check("lichtkringen", "lichtkring"); - check("lichtregelsystemen", "lichtregelsystem"); - check("lichtste", "lichtst"); - check("lichtstromende", "lichtstrom"); - check("lichtte", "licht"); - check("lichtten", "licht"); - check("lichttoetreding", "lichttoetred"); - check("lichtverontreinigde", "lichtverontreinigd"); - check("lichtzinnige", "lichtzinn"); - check("lid", "lid"); - check("lidia", "lidia"); - check("lidmaatschap", "lidmaatschap"); - check("lidstaten", "lidstat"); - check("lidvereniging", "lidveren"); - check("opgingen", "opging"); - check("opglanzing", "opglanz"); - check("opglanzingen", "opglanz"); - check("opglimlachten", "opglimlacht"); - check("opglimpen", "opglimp"); - check("opglimpende", "opglimp"); - check("opglimping", "opglimp"); - check("opglimpingen", "opglimp"); - check("opgraven", "opgrav"); - check("opgrijnzen", "opgrijnz"); - check("opgrijzende", "opgrijz"); - check("opgroeien", "opgroei"); - check("opgroeiende", "opgroei"); - check("opgroeiplaats", "opgroeiplat"); - check("ophaal", "ophal"); - check("ophaaldienst", "ophaaldienst"); - check("ophaalkosten", "ophaalkost"); - check("ophaalsystemen", "ophaalsystem"); - check("ophaalt", "ophaalt"); - check("ophaaltruck", "ophaaltruck"); - check("ophalen", "ophal"); - check("ophalend", "ophal"); - check("ophalers", "ophaler"); - check("ophef", "ophef"); - check("opheldering", "ophelder"); - check("ophemelde", "ophemeld"); - check("ophemelen", "ophemel"); - check("opheusden", "opheusd"); - check("ophief", "ophief"); - check("ophield", "ophield"); - check("ophieven", "ophiev"); - check("ophoepelt", "ophoepelt"); - check("ophoog", "ophog"); - check("ophoogzand", "ophoogzand"); - check("ophopen", "ophop"); - check("ophoping", "ophop"); - check("ophouden", "ophoud"); + check("lichaamsziek", "lichaamsziek"); + check("lichamelijk", "licham"); + check("lichamelijke", "licham"); + check("lichamelijkheden", "licham"); + check("lichamen", "licham"); + check("lichere", "licher"); + check("licht", "licht"); + check("lichtbeeld", "lichtbeeld"); + check("lichtbruin", "lichtbruin"); + check("lichtdoorlatende", "lichtdoorlat"); + check("lichte", "licht"); + check("lichten", "licht"); + check("lichtende", "lichtend"); + check("lichtenvoorde", "lichtenvoord"); + check("lichter", "lichter"); + check("lichtere", "lichter"); + check("lichters", "lichter"); + check("lichtgevoeligheid", "lichtgevoel"); + check("lichtgewicht", "lichtgewicht"); + check("lichtgrijs", "lichtgrijs"); + check("lichthoeveelheid", "lichthoevel"); + check("lichtintensiteit", "lichtintensiteit"); + check("lichtje", "lichtj"); + check("lichtjes", "lichtjes"); + check("lichtkranten", "lichtkrant"); + check("lichtkring", "lichtkring"); + check("lichtkringen", "lichtkring"); + check("lichtregelsystemen", "lichtregelsystem"); + check("lichtste", "lichtst"); + check("lichtstromende", "lichtstrom"); + check("lichtte", "licht"); + check("lichtten", "licht"); + check("lichttoetreding", "lichttoetred"); + check("lichtverontreinigde", "lichtverontreinigd"); + check("lichtzinnige", "lichtzinn"); + check("lid", "lid"); + check("lidia", "lidia"); + check("lidmaatschap", "lidmaatschap"); + check("lidstaten", "lidstat"); + check("lidvereniging", "lidveren"); + check("opgingen", "opging"); + check("opglanzing", "opglanz"); + check("opglanzingen", "opglanz"); + check("opglimlachten", "opglimlacht"); + check("opglimpen", "opglimp"); + check("opglimpende", "opglimp"); + check("opglimping", "opglimp"); + check("opglimpingen", "opglimp"); + check("opgraven", "opgrav"); + check("opgrijnzen", "opgrijnz"); + check("opgrijzende", "opgrijz"); + check("opgroeien", "opgroei"); + check("opgroeiende", "opgroei"); + check("opgroeiplaats", "opgroeiplat"); + check("ophaal", "ophal"); + check("ophaaldienst", "ophaaldienst"); + check("ophaalkosten", "ophaalkost"); + check("ophaalsystemen", "ophaalsystem"); + check("ophaalt", "ophaalt"); + check("ophaaltruck", "ophaaltruck"); + check("ophalen", "ophal"); + check("ophalend", "ophal"); + check("ophalers", "ophaler"); + check("ophef", "ophef"); + check("opheldering", "ophelder"); + check("ophemelde", "ophemeld"); + check("ophemelen", "ophemel"); + check("opheusden", "opheusd"); + check("ophief", "ophief"); + check("ophield", "ophield"); + check("ophieven", "ophiev"); + check("ophoepelt", "ophoepelt"); + check("ophoog", "ophog"); + check("ophoogzand", "ophoogzand"); + check("ophopen", "ophop"); + check("ophoping", "ophop"); + check("ophouden", "ophoud"); } /** Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java (revision 1381159) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java (working copy) @@ -38,38 +38,38 @@ */ public class TestThaiAnalyzer extends BaseTokenStreamTestCase { - + @Override public void setUp() throws Exception { super.setUp(); assumeTrue("JRE does not support Thai dictionary-based BreakIterator", ThaiWordFilter.DBBI_AVAILABLE); } - /* - * testcase for offsets - */ - public void testOffsets() throws Exception { - assertAnalyzesTo(new ThaiAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET), "การที่ได้ต้องแสดงว่างานดี", - new String[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี" }, - new int[] { 0, 3, 6, 9, 13, 17, 20, 23 }, - new int[] { 3, 6, 9, 13, 17, 20, 23, 25 }); - } - - public void testStopWords() throws Exception { - assertAnalyzesTo(new ThaiAnalyzer(TEST_VERSION_CURRENT), "การที่ได้ต้องแสดงว่างานดี", - new String[] { "แสดง", "งาน", "ดี" }, - new int[] { 13, 20, 23 }, - new int[] { 17, 23, 25 }, - new int[] { 5, 2, 1 }); - } - - public void testBackwardsStopWords() throws Exception { - assertAnalyzesTo(new ThaiAnalyzer(Version.LUCENE_35), "การที่ได้ต้องแสดงว่างานดี", - new String[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี" }, - new int[] { 0, 3, 6, 9, 13, 17, 20, 23 }, - new int[] { 3, 6, 9, 13, 17, 20, 23, 25 }); - } - - public void testTokenType() throws Exception { + /* + * testcase for offsets + */ + public void testOffsets() throws Exception { + assertAnalyzesTo(new ThaiAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET), "การที่ได้ต้องแสดงว่างานดี", + new String[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี" }, + new int[] { 0, 3, 6, 9, 13, 17, 20, 23 }, + new int[] { 3, 6, 9, 13, 17, 20, 23, 25 }); + } + + public void testStopWords() throws Exception { + assertAnalyzesTo(new ThaiAnalyzer(TEST_VERSION_CURRENT), "การที่ได้ต้องแสดงว่างานดี", + new String[] { "แสดง", "งาน", "ดี" }, + new int[] { 13, 20, 23 }, + new int[] { 17, 23, 25 }, + new int[] { 5, 2, 1 }); + } + + public void testBackwardsStopWords() throws Exception { + assertAnalyzesTo(new ThaiAnalyzer(Version.LUCENE_35), "การที่ได้ต้องแสดงว่างานดี", + new String[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี" }, + new int[] { 0, 3, 6, 9, 13, 17, 20, 23 }, + new int[] { 3, 6, 9, 13, 17, 20, 23, 25 }); + } + + public void testTokenType() throws Exception { assertAnalyzesTo(new ThaiAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET), "การที่ได้ต้องแสดงว่างานดี ๑๒๓", new String[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี", "๑๒๓" }, new String[] { "", "", @@ -77,69 +77,69 @@ "", "", "", "", "" }); - } + } - /** - * Thai numeric tokens were typed as instead of . - * @deprecated (3.1) testing backwards behavior - */ - @Deprecated - public void testBuggyTokenType30() throws Exception { - assertAnalyzesTo(new ThaiAnalyzer(Version.LUCENE_30), "การที่ได้ต้องแสดงว่างานดี ๑๒๓", + /** + * Thai numeric tokens were typed as instead of . + * @deprecated (3.1) testing backwards behavior + */ + @Deprecated + public void testBuggyTokenType30() throws Exception { + assertAnalyzesTo(new ThaiAnalyzer(Version.LUCENE_30), "การที่ได้ต้องแสดงว่างานดี ๑๒๓", new String[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี", "๑๒๓" }, new String[] { "", "", "", "", "", "", "", "", "" }); - } - - /** @deprecated (3.1) testing backwards behavior */ - @Deprecated + } + + /** @deprecated (3.1) testing backwards behavior */ + @Deprecated public void testAnalyzer30() throws Exception { ThaiAnalyzer analyzer = new ThaiAnalyzer(Version.LUCENE_30); - - assertAnalyzesTo(analyzer, "", new String[] {}); - assertAnalyzesTo( - analyzer, - "การที่ได้ต้องแสดงว่างานดี", - new String[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี"}); + assertAnalyzesTo(analyzer, "", new String[] {}); - assertAnalyzesTo( - analyzer, - "บริษัทชื่อ XY&Z - คุยกับ xyz@demo.com", - new String[] { "บริษัท", "ชื่อ", "xy&z", "คุย", "กับ", "xyz@demo.com" }); + assertAnalyzesTo( + analyzer, + "การที่ได้ต้องแสดงว่างานดี", + new String[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี"}); + assertAnalyzesTo( + analyzer, + "บริษัทชื่อ XY&Z - คุยกับ xyz@demo.com", + new String[] { "บริษัท", "ชื่อ", "xy&z", "คุย", "กับ", "xyz@demo.com" }); + // English stop words - assertAnalyzesTo( - analyzer, - "ประโยคว่า The quick brown fox jumped over the lazy dogs", - new String[] { "ประโยค", "ว่า", "quick", "brown", "fox", "jumped", "over", "lazy", "dogs" }); - } - - /* - * Test that position increments are adjusted correctly for stopwords. - */ - // note this test uses stopfilter's stopset - public void testPositionIncrements() throws Exception { - final ThaiAnalyzer analyzer = new ThaiAnalyzer(TEST_VERSION_CURRENT, StopAnalyzer.ENGLISH_STOP_WORDS_SET); + assertAnalyzesTo( + analyzer, + "ประโยคว่า The quick brown fox jumped over the lazy dogs", + new String[] { "ประโยค", "ว่า", "quick", "brown", "fox", "jumped", "over", "lazy", "dogs" }); + } + + /* + * Test that position increments are adjusted correctly for stopwords. + */ + // note this test uses stopfilter's stopset + public void testPositionIncrements() throws Exception { + final ThaiAnalyzer analyzer = new ThaiAnalyzer(TEST_VERSION_CURRENT, StopAnalyzer.ENGLISH_STOP_WORDS_SET); assertAnalyzesTo(analyzer, "การที่ได้ต้อง the แสดงว่างานดี", new String[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี" }, new int[] { 0, 3, 6, 9, 18, 22, 25, 28 }, new int[] { 3, 6, 9, 13, 22, 25, 28, 30 }, new int[] { 1, 1, 1, 1, 2, 1, 1, 1 }); - - // case that a stopword is adjacent to thai text, with no whitespace + + // case that a stopword is adjacent to thai text, with no whitespace assertAnalyzesTo(analyzer, "การที่ได้ต้องthe แสดงว่างานดี", new String[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี" }, new int[] { 0, 3, 6, 9, 17, 21, 24, 27 }, new int[] { 3, 6, 9, 13, 21, 24, 27, 29 }, new int[] { 1, 1, 1, 1, 2, 1, 1, 1 }); - } - - public void testReusableTokenStream() throws Exception { - ThaiAnalyzer analyzer = new ThaiAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET); - assertAnalyzesToReuse(analyzer, "", new String[] {}); + } + public void testReusableTokenStream() throws Exception { + ThaiAnalyzer analyzer = new ThaiAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET); + assertAnalyzesToReuse(analyzer, "", new String[] {}); + assertAnalyzesToReuse( analyzer, "การที่ได้ต้องแสดงว่างานดี", @@ -149,25 +149,25 @@ analyzer, "บริษัทชื่อ XY&Z - คุยกับ xyz@demo.com", new String[] { "บริษัท", "ชื่อ", "xy", "z", "คุย", "กับ", "xyz", "demo.com" }); - } - - /** @deprecated (3.1) for version back compat */ - @Deprecated - public void testReusableTokenStream30() throws Exception { - ThaiAnalyzer analyzer = new ThaiAnalyzer(Version.LUCENE_30); - assertAnalyzesToReuse(analyzer, "", new String[] {}); + } - assertAnalyzesToReuse( + /** @deprecated (3.1) for version back compat */ + @Deprecated + public void testReusableTokenStream30() throws Exception { + ThaiAnalyzer analyzer = new ThaiAnalyzer(Version.LUCENE_30); + assertAnalyzesToReuse(analyzer, "", new String[] {}); + + assertAnalyzesToReuse( analyzer, "การที่ได้ต้องแสดงว่างานดี", new String[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี"}); - assertAnalyzesToReuse( + assertAnalyzesToReuse( analyzer, "บริษัทชื่อ XY&Z - คุยกับ xyz@demo.com", new String[] { "บริษัท", "ชื่อ", "xy&z", "คุย", "กับ", "xyz@demo.com" }); } - + /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { checkRandomData(random(), new ThaiAnalyzer(TEST_VERSION_CURRENT), 1000*RANDOM_MULTIPLIER); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizer.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizer.java (revision 1381159) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizer.java (working copy) @@ -37,7 +37,7 @@ public class TestPatternTokenizer extends BaseTokenStreamTestCase { - public void testSplitting() throws Exception + public void testSplitting() throws Exception { String qpattern = "\\'([^\\']+)\\'"; // get stuff between "'" String[][] tests = { @@ -71,8 +71,8 @@ } }*/ } - } - + } + public void testOffsetCorrection() throws Exception { final String INPUT = "Günther Günther is here"; Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java (revision 1381159) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java (working copy) @@ -48,29 +48,29 @@ new String[] { "προυποθεσ", "αψογ", "μεστ", "αλλ" }); } - /** - * Test the analysis of various greek strings. - * - * @throws Exception in case an error occurs - * @deprecated (3.1) Remove this test when support for 3.0 is no longer needed - */ + /** + * Test the analysis of various greek strings. + * + * @throws Exception in case an error occurs + * @deprecated (3.1) Remove this test when support for 3.0 is no longer needed + */ @Deprecated - public void testAnalyzerBWCompat() throws Exception { - Analyzer a = new GreekAnalyzer(Version.LUCENE_30); - // Verify the correct analysis of capitals and small accented letters - assertAnalyzesTo(a, "Μία εξαιρετικά καλή και πλούσια σειρά χαρακτήρων της Ελληνικής γλώσσας", - new String[] { "μια", "εξαιρετικα", "καλη", "πλουσια", "σειρα", "χαρακτηρων", - "ελληνικησ", "γλωσσασ" }); - // Verify the correct analysis of small letters with diaeresis and the elimination - // of punctuation marks - assertAnalyzesTo(a, "Προϊόντα (και) [πολλαπλές] - ΑΝΑΓΚΕΣ", - new String[] { "προιοντα", "πολλαπλεσ", "αναγκεσ" }); - // Verify the correct analysis of capital accented letters and capital letters with diaeresis, - // as well as the elimination of stop words - assertAnalyzesTo(a, "ΠΡΟΫΠΟΘΕΣΕΙΣ Άψογος, ο μεστός και οι άλλοι", - new String[] { "προυποθεσεισ", "αψογοσ", "μεστοσ", "αλλοι" }); - } - + public void testAnalyzerBWCompat() throws Exception { + Analyzer a = new GreekAnalyzer(Version.LUCENE_30); + // Verify the correct analysis of capitals and small accented letters + assertAnalyzesTo(a, "Μία εξαιρετικά καλή και πλούσια σειρά χαρακτήρων της Ελληνικής γλώσσας", + new String[] { "μια", "εξαιρετικα", "καλη", "πλουσια", "σειρα", "χαρακτηρων", + "ελληνικησ", "γλωσσασ" }); + // Verify the correct analysis of small letters with diaeresis and the elimination + // of punctuation marks + assertAnalyzesTo(a, "Προϊόντα (και) [πολλαπλές] - ΑΝΑΓΚΕΣ", + new String[] { "προιοντα", "πολλαπλεσ", "αναγκεσ" }); + // Verify the correct analysis of capital accented letters and capital letters with diaeresis, + // as well as the elimination of stop words + assertAnalyzesTo(a, "ΠΡΟΫΠΟΘΕΣΕΙΣ Άψογος, ο μεστός και οι άλλοι", + new String[] { "προυποθεσεισ", "αψογοσ", "μεστοσ", "αλλοι" }); + } + public void testReusableTokenStream() throws Exception { Analyzer a = new GreekAnalyzer(TEST_VERSION_CURRENT); // Verify the correct analysis of capitals and small accented letters, and Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMap.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMap.java (revision 1381159) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMap.java (working copy) @@ -49,7 +49,7 @@ } public void testReadMappingRules() throws Exception { - SlowSynonymMap synMap; + SlowSynonymMap synMap; // (a)->[b] List rules = new ArrayList(); @@ -244,15 +244,15 @@ } public void testBigramTokenizer() throws Exception { - SlowSynonymMap synMap; - - // prepare bi-gram tokenizer factory - TokenizerFactory tf = new NGramTokenizerFactory(); - Map args = new HashMap(); - args.put("minGramSize","2"); - args.put("maxGramSize","2"); - tf.init( args ); + SlowSynonymMap synMap; + // prepare bi-gram tokenizer factory + TokenizerFactory tf = new NGramTokenizerFactory(); + Map args = new HashMap(); + args.put("minGramSize","2"); + args.put("maxGramSize","2"); + tf.init( args ); + // (ab)->(bc)->(cd)->[ef][fg][gh] List rules = new ArrayList(); rules.add( "abcd=>efgh" ); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestWordlistLoader.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestWordlistLoader.java (revision 1381159) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestWordlistLoader.java (working copy) @@ -46,8 +46,8 @@ private void checkSet(CharArraySet wordset) { assertEquals(3, wordset.size()); - assertTrue(wordset.contains("ONE")); // case is not modified - assertTrue(wordset.contains("two")); // surrounding whitespace is removed + assertTrue(wordset.contains("ONE")); // case is not modified + assertTrue(wordset.contains("two")); // surrounding whitespace is removed assertTrue(wordset.contains("three")); assertFalse(wordset.contains("four")); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java (revision 1381159) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java (working copy) @@ -30967,7 +30967,7 @@ } } - // numRead < 0 + // numRead < 0 return true; } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java (revision 1381159) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java (working copy) @@ -30,7 +30,7 @@ * <analyzer> * <tokenizer class="solr.WhitespaceTokenizerFactory"/> * <filter class="solr.DictionaryCompoundWordTokenFilterFactory" dictionary="dictionary.txt" - * minWordSize="5" minSubwordSize="2" maxSubwordSize="15" onlyLongestMatch="true"/> + * minWordSize="5" minSubwordSize="2" maxSubwordSize="15" onlyLongestMatch="true"/> * </analyzer> * </fieldType> * Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java (revision 1381159) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java (working copy) @@ -50,7 +50,7 @@ * <analyzer> * <tokenizer class="solr.WhitespaceTokenizerFactory"/> * <filter class="solr.HyphenationCompoundWordTokenFilterFactory" hyphenator="hyphenator.xml" encoding="UTF-8" - * dictionary="dictionary.txt" minWordSize="5" minSubwordSize="2" maxSubwordSize="15" onlyLongestMatch="false"/> + * dictionary="dictionary.txt" minWordSize="5" minSubwordSize="2" maxSubwordSize="15" onlyLongestMatch="false"/> * </analyzer> * </fieldType> * Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java (revision 1381159) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java (working copy) @@ -936,7 +936,7 @@ } } - // numRead < 0 + // numRead < 0 return true; } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/StandardTokenizerImpl31.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/StandardTokenizerImpl31.java (revision 1381159) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/StandardTokenizerImpl31.java (working copy) @@ -810,7 +810,7 @@ } } - // numRead < 0 + // numRead < 0 return true; } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/UAX29URLEmailTokenizerImpl31.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/UAX29URLEmailTokenizerImpl31.java (revision 1381159) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/UAX29URLEmailTokenizerImpl31.java (working copy) @@ -3385,7 +3385,7 @@ } } - // numRead < 0 + // numRead < 0 return true; } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.java (revision 1381159) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.java (working copy) @@ -4126,7 +4126,7 @@ } } - // numRead < 0 + // numRead < 0 return true; } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/std34/StandardTokenizerImpl34.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/std34/StandardTokenizerImpl34.java (revision 1381159) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/std34/StandardTokenizerImpl34.java (working copy) @@ -827,7 +827,7 @@ } } - // numRead < 0 + // numRead < 0 return true; } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/std34/UAX29URLEmailTokenizerImpl34.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/std34/UAX29URLEmailTokenizerImpl34.java (revision 1381159) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/std34/UAX29URLEmailTokenizerImpl34.java (working copy) @@ -3473,7 +3473,7 @@ } } - // numRead < 0 + // numRead < 0 return true; } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/std36/UAX29URLEmailTokenizerImpl36.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/std36/UAX29URLEmailTokenizerImpl36.java (revision 1381159) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/std36/UAX29URLEmailTokenizerImpl36.java (working copy) @@ -3909,7 +3909,7 @@ } } - // numRead < 0 + // numRead < 0 return true; } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.java (revision 1381159) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.java (working copy) @@ -453,7 +453,7 @@ } } - // numRead < 0 + // numRead < 0 return true; } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java (revision 1381159) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java (working copy) @@ -49,8 +49,8 @@ * <analyzer> * <tokenizer class="solr.WhitespaceTokenizerFactory"/> * <filter class="solr.CapitalizationFilterFactory" onlyFirstWord="true" - * keep="java solr lucene" keepIgnoreCase="false" - * okPrefix="McK McD McA"/> + * keep="java solr lucene" keepIgnoreCase="false" + * okPrefix="McK McD McA"/> * </analyzer> * </fieldType> * Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilter.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilter.java (revision 1381159) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilter.java (working copy) @@ -31,8 +31,8 @@ * Example field definition in schema.xml: *

  * <fieldtype name="text" class="solr.TextField" positionIncrementGap="100">
- * 	<analyzer type="index">
- * 		<tokenizer class="solr.WhitespaceTokenizerFactory"/>
+ *  <analyzer type="index">
+ *    <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *      <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
  *      <filter class="solr.StopFilterFactory" ignoreCase="true"/>
  *      <filter class="solr.HyphenatedWordsFilterFactory"/>
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java	(revision 1381159)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java	(working copy)
@@ -366,7 +366,7 @@
     public final void end() {
       // set final offset
       final int finalOffset = correctOffset(str.length());
-    	this.offsetAtt.setOffset(finalOffset, finalOffset);
+      this.offsetAtt.setOffset(finalOffset, finalOffset);
     }
 
     @Override
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilterFactory.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilterFactory.java	(revision 1381159)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilterFactory.java	(working copy)
@@ -33,7 +33,7 @@
  *
  */
 public class HyphenatedWordsFilterFactory extends TokenFilterFactory {
-	public HyphenatedWordsFilter create(TokenStream input) {
-		return new HyphenatedWordsFilter(input);
-	}
+  public HyphenatedWordsFilter create(TokenStream input) {
+    return new HyphenatedWordsFilter(input);
+  }
 }
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java	(revision 1381159)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java	(working copy)
@@ -50,7 +50,7 @@
   /** File containing default Brazilian Portuguese stopwords. */
   public final static String DEFAULT_STOPWORD_FILE = "stopwords.txt";
   
-	/**
+  /**
    * Returns an unmodifiable instance of the default stop-words set.
    * @return an unmodifiable instance of the default stop-words set.
    */
@@ -74,19 +74,19 @@
   }
 
 
-	/**
-	 * Contains words that should be indexed but not stemmed.
-	 */
-	private CharArraySet excltable = CharArraySet.EMPTY_SET;
-	
-	/**
-	 * Builds an analyzer with the default stop words ({@link #getDefaultStopSet()}).
-	 */
-	public BrazilianAnalyzer(Version matchVersion) {
+  /**
+   * Contains words that should be indexed but not stemmed.
+   */
+  private CharArraySet excltable = CharArraySet.EMPTY_SET;
+
+  /**
+   * Builds an analyzer with the default stop words ({@link #getDefaultStopSet()}).
+   */
+  public BrazilianAnalyzer(Version matchVersion) {
     this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET);
-	}
-	
-	/**
+  }
+
+  /**
    * Builds an analyzer with the given stop words
    * 
    * @param matchVersion
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianStemmer.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianStemmer.java	(revision 1381159)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianStemmer.java	(working copy)
@@ -25,37 +25,37 @@
 public class BrazilianStemmer {
   private static final Locale locale = new Locale("pt", "BR");
 
-	/**
-	 * Changed term
-	 */
-	private   String TERM ;
-	private   String CT ;
-	private   String R1 ;
-	private   String R2 ;
-	private   String RV ;
+  /**
+   * Changed term
+   */
+  private   String TERM ;
+  private   String CT ;
+  private   String R1 ;
+  private   String R2 ;
+  private   String RV ;
 
 
-	public BrazilianStemmer() {
-	}
+  public BrazilianStemmer() {
+  }
 
-	/**
-	 * Stems the given term to an unique discriminator.
-	 *
-	 * @param term  The term that should be stemmed.
-	 * @return      Discriminator for term
-	 */
-	protected String stem( String term ) {
+  /**
+   * Stems the given term to an unique discriminator.
+   *
+   * @param term  The term that should be stemmed.
+   * @return      Discriminator for term
+   */
+  protected String stem( String term ) {
     boolean altered = false ; // altered the term
 
     // creates CT
     createCT(term) ;
 
-		if ( !isIndexable( CT ) ) {
-			return null;
-		}
-		if ( !isStemmable( CT ) ) {
-			return CT ;
-		}
+    if ( !isIndexable( CT ) ) {
+      return null;
+    }
+    if ( !isStemmable( CT ) ) {
+      return CT ;
+    }
 
     R1 = getR1(CT) ;
     R2 = getR1(R1) ;
@@ -76,38 +76,38 @@
     step5() ;
 
     return CT ;
-	}
+  }
 
-	/**
-	 * Checks a term if it can be processed correctly.
-	 *
-	 * @return  true if, and only if, the given term consists in letters.
-	 */
-	private boolean isStemmable( String term ) {
-		for ( int c = 0; c < term.length(); c++ ) {
-			// Discard terms that contain non-letter characters.
-			if ( !Character.isLetter(term.charAt(c))) {
-				return false;
-			}
-		}
-		return true;
-	}
+  /**
+   * Checks a term if it can be processed correctly.
+   *
+   * @return  true if, and only if, the given term consists in letters.
+   */
+  private boolean isStemmable( String term ) {
+    for ( int c = 0; c < term.length(); c++ ) {
+      // Discard terms that contain non-letter characters.
+      if ( !Character.isLetter(term.charAt(c))) {
+        return false;
+      }
+    }
+    return true;
+  }
 
-	/**
-	 * Checks a term if it can be processed indexed.
-	 *
-	 * @return  true if it can be indexed
-	 */
-	private boolean isIndexable( String term ) {
-		return (term.length() < 30) && (term.length() > 2) ;
-	}
+  /**
+   * Checks a term if it can be processed indexed.
+   *
+   * @return  true if it can be indexed
+   */
+  private boolean isIndexable( String term ) {
+    return (term.length() < 30) && (term.length() > 2) ;
+  }
 
-	/**
-	 * See if string is 'a','e','i','o','u'
+  /**
+   * See if string is 'a','e','i','o','u'
    *
    * @return true if is vowel
-	 */
-	private boolean isVowel( char value ) {
+   */
+  private boolean isVowel( char value ) {
     return (value == 'a') ||
            (value == 'e') ||
            (value == 'i') ||
@@ -115,16 +115,16 @@
            (value == 'u') ;
   }
 
-	/**
-	 * Gets R1
+  /**
+   * Gets R1
    *
    * R1 - is the region after the first non-vowel following a vowel,
    *      or is the null region at the end of the word if there is
    *      no such non-vowel.
    *
    * @return null or a string representing R1
-	 */
-	private String getR1( String value ) {
+   */
+  private String getR1( String value ) {
     int     i;
     int     j;
 
@@ -159,8 +159,8 @@
     return value.substring(j+1) ;
   }
 
-	/**
-	 * Gets RV
+  /**
+   * Gets RV
    *
    * RV - IF the second letter is a consonant, RV is the region after
    *      the next following vowel,
@@ -175,8 +175,8 @@
    *      found.
    *
    * @return null or a string representing RV
-	 */
-	private String getRV( String value ) {
+   */
+  private String getRV( String value ) {
     int     i;
     int     j;
 
@@ -229,15 +229,15 @@
     return null ;
   }
 
-	/**
+  /**
    * 1) Turn to lowercase
    * 2) Remove accents
    * 3) ã -> a ; õ -> o
    * 4) ç -> c
    *
    * @return null or a string transformed
-	 */
-	private String changeTerm( String value ) {
+   */
+  private String changeTerm( String value ) {
     int     j;
     String  r = "" ;
 
@@ -282,12 +282,12 @@
     return r ;
   }
 
-	/**
+  /**
    * Check if a string ends with a suffix
    *
    * @return true if the string ends with the specified suffix
-	 */
-	private boolean suffix( String value, String suffix ) {
+   */
+  private boolean suffix( String value, String suffix ) {
 
     // be-safe !!!
     if ((value == null) || (suffix == null)) {
@@ -301,12 +301,12 @@
     return value.substring(value.length()-suffix.length()).equals(suffix);
   }
 
-	/**
+  /**
    * Replace a string suffix by another
    *
    * @return the replaced String
-	 */
-	private String replaceSuffix( String value, String toReplace, String changeTo ) {
+   */
+  private String replaceSuffix( String value, String toReplace, String changeTo ) {
     String vvalue ;
 
     // be-safe !!!
@@ -325,12 +325,12 @@
     }
   }
 
-	/**
+  /**
    * Remove a string suffix
    *
    * @return the String without the suffix
-	 */
-	private String removeSuffix( String value, String toRemove ) {
+   */
+  private String removeSuffix( String value, String toRemove ) {
     // be-safe !!!
     if ((value == null) ||
         (toRemove == null) ||
@@ -341,12 +341,12 @@
     return value.substring(0,value.length()-toRemove.length()) ;
   }
 
-	/**
+  /**
    * See if a suffix is preceded by a String
    *
    * @return true if the suffix is preceded
-	 */
-	private boolean suffixPreceded( String value, String suffix, String preceded ) {
+   */
+  private boolean suffixPreceded( String value, String suffix, String preceded ) {
     // be-safe !!!
     if ((value == null) ||
         (suffix == null) ||
@@ -358,10 +358,10 @@
     return suffix(removeSuffix(value,suffix),preceded) ;
   }
 
-	/**
-	 * Creates CT (changed term) , substituting * 'ã' and 'õ' for 'a~' and 'o~'.
-	 */
-	private void createCT( String term ) {
+  /**
+   * Creates CT (changed term) , substituting * 'ã' and 'õ' for 'a~' and 'o~'.
+   */
+  private void createCT( String term ) {
     CT = changeTerm(term) ;
 
     if (CT.length() < 2) return ;
@@ -396,14 +396,14 @@
   }
 
 
-	/**
-	 * Standard suffix removal.
+  /**
+   * Standard suffix removal.
    * Search for the longest among the following suffixes, and perform
    * the following actions:
    *
    * @return false if no ending was removed
-	 */
-	private boolean step1() {
+   */
+  private boolean step1() {
     if (CT == null) return false ;
 
     // suffix length = 7
@@ -559,15 +559,15 @@
   }
 
 
-	/**
-	 * Verb suffixes.
+  /**
+   * Verb suffixes.
    *
    * Search for the longest among the following suffixes in RV,
    * and if found, delete.
    *
    * @return false if no ending was removed
-	*/
-	private boolean step2() {
+  */
+  private boolean step2() {
     if (RV == null) return false ;
 
     // suffix lenght = 7
@@ -941,11 +941,11 @@
     return false ;
   }
 
-	/**
-	 * Delete suffix 'i' if in RV and preceded by 'c'
+  /**
+   * Delete suffix 'i' if in RV and preceded by 'c'
    *
-	*/
-	private void step3() {
+  */
+  private void step3() {
     if (RV == null) return ;
 
     if (suffix(RV,"i") && suffixPreceded(RV,"i","c")) {
@@ -954,14 +954,14 @@
 
   }
 
-	/**
-	 * Residual suffix
+  /**
+   * Residual suffix
    *
    * If the word ends with one of the suffixes (os a i o á í ó)
    * in RV, delete it
    *
-	*/
-	private void step4() {
+  */
+  private void step4() {
     if (RV == null) return  ;
 
     if (suffix(RV,"os")) {
@@ -979,15 +979,15 @@
 
   }
 
-	/**
-	 * If the word ends with one of ( e é ê) in RV,delete it,
+  /**
+   * If the word ends with one of ( e é ê) in RV,delete it,
    * and if preceded by 'gu' (or 'ci') with the 'u' (or 'i') in RV,
    * delete the 'u' (or 'i')
    *
    * Or if the word ends ç remove the cedilha
    *
-	*/
-	private void step5() {
+  */
+  private void step5() {
     if (RV == null) return  ;
 
     if (suffix(RV,"e")) {
@@ -1007,18 +1007,18 @@
     }
   }
 
-	/**
-	 * For log and debug purpose
-	 *
-	 * @return  TERM, CT, RV, R1 and R2
-	 */
-	public String log() {
+  /**
+   * For log and debug purpose
+   *
+   * @return  TERM, CT, RV, R1 and R2
+   */
+  public String log() {
     return " (TERM = " + TERM + ")" +
            " (CT = " + CT +")" +
            " (RV = " + RV +")" +
            " (R1 = " + R1 +")" +
            " (R2 = " + R2 +")" ;
-	}
+  }
 
 }
 
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java	(revision 1381159)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java	(working copy)
@@ -211,6 +211,6 @@
       }
     }
     return allStopWords.toArray(new Term[allStopWords.size()]);
-	}
+  }
 
 }
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchStemmer.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchStemmer.java	(revision 1381159)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchStemmer.java	(working copy)
@@ -45,42 +45,42 @@
    */
    private StringBuilder tb = new StringBuilder();
 
-	/**
-	 * Region R0 is equal to the whole buffer
-	 */
-	private String R0;
+  /**
+   * Region R0 is equal to the whole buffer
+   */
+  private String R0;
 
-	/**
-	 * Region RV
-	 * "If the word begins with two vowels, RV is the region after the third letter,
-	 * otherwise the region after the first vowel not at the beginning of the word,
-	 * or the end of the word if these positions cannot be found."
-	 */
+  /**
+   * Region RV
+   * "If the word begins with two vowels, RV is the region after the third letter,
+   * otherwise the region after the first vowel not at the beginning of the word,
+   * or the end of the word if these positions cannot be found."
+   */
     private String RV;
 
-	/**
-	 * Region R1
-	 * "R1 is the region after the first non-vowel following a vowel
-	 * or is the null region at the end of the word if there is no such non-vowel"
-	 */
+  /**
+   * Region R1
+   * "R1 is the region after the first non-vowel following a vowel
+   * or is the null region at the end of the word if there is no such non-vowel"
+   */
     private String R1;
 
-	/**
-	 * Region R2
-	 * "R2 is the region after the first non-vowel in R1 following a vowel
-	 * or is the null region at the end of the word if there is no such non-vowel"
-	 */
+  /**
+   * Region R2
+   * "R2 is the region after the first non-vowel in R1 following a vowel
+   * or is the null region at the end of the word if there is no such non-vowel"
+   */
     private String R2;
 
 
-	/**
-	 * Set to true if we need to perform step 2
-	 */
+  /**
+   * Set to true if we need to perform step 2
+   */
     private boolean suite;
 
-	/**
-	 * Set to true if the buffer was modified
-	 */
+  /**
+   * Set to true if the buffer was modified
+   */
     private boolean modified;
 
 
@@ -91,626 +91,626 @@
      * @return java.lang.String  Discriminator for term
      */
     protected String stem( String term ) {
-		if ( !isStemmable( term ) ) {
-			return term;
-		}
+    if ( !isStemmable( term ) ) {
+      return term;
+    }
 
-		// Use lowercase for medium stemming.
-		term = term.toLowerCase(locale);
+    // Use lowercase for medium stemming.
+    term = term.toLowerCase(locale);
 
-		// Reset the StringBuilder.
-		sb.delete( 0, sb.length() );
-		sb.insert( 0, term );
+    // Reset the StringBuilder.
+    sb.delete( 0, sb.length() );
+    sb.insert( 0, term );
 
-		// reset the booleans
-		modified = false;
-		suite = false;
+    // reset the booleans
+    modified = false;
+    suite = false;
 
-		sb = treatVowels( sb );
+    sb = treatVowels( sb );
 
-		setStrings();
+    setStrings();
 
-		step1();
+    step1();
 
-		if (!modified || suite)
-		{
-			if (RV != null)
-			{
-				suite = step2a();
-				if (!suite)
-					step2b();
-			}
-		}
+    if (!modified || suite)
+    {
+      if (RV != null)
+      {
+        suite = step2a();
+        if (!suite)
+          step2b();
+      }
+    }
 
-		if (modified || suite)
-			step3();
-		else
-			step4();
+    if (modified || suite)
+      step3();
+    else
+      step4();
 
-		step5();
+    step5();
 
-		step6();
+    step6();
 
-		return sb.toString();
+    return sb.toString();
     }
 
-	/**
-	 * Sets the search region Strings
- * it needs to be done each time the buffer was modified - */ - private void setStrings() { - // set the strings - R0 = sb.toString(); - RV = retrieveRV( sb ); - R1 = retrieveR( sb ); - if ( R1 != null ) - { - tb.delete( 0, tb.length() ); - tb.insert( 0, R1 ); - R2 = retrieveR( tb ); - } - else - R2 = null; - } + /** + * Sets the search region Strings
+ * it needs to be done each time the buffer was modified + */ + private void setStrings() { + // set the strings + R0 = sb.toString(); + RV = retrieveRV( sb ); + R1 = retrieveR( sb ); + if ( R1 != null ) + { + tb.delete( 0, tb.length() ); + tb.insert( 0, R1 ); + R2 = retrieveR( tb ); + } + else + R2 = null; + } - /** - * First step of the Porter Algorithm
- * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation - */ - private void step1( ) { - String[] suffix = { "ances", "iqUes", "ismes", "ables", "istes", "ance", "iqUe", "isme", "able", "iste" }; - deleteFrom( R2, suffix ); + /** + * First step of the Porter Algorithm
+ * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation + */ + private void step1( ) { + String[] suffix = { "ances", "iqUes", "ismes", "ables", "istes", "ance", "iqUe", "isme", "able", "iste" }; + deleteFrom( R2, suffix ); - replaceFrom( R2, new String[] { "logies", "logie" }, "log" ); - replaceFrom( R2, new String[] { "usions", "utions", "usion", "ution" }, "u" ); - replaceFrom( R2, new String[] { "ences", "ence" }, "ent" ); + replaceFrom( R2, new String[] { "logies", "logie" }, "log" ); + replaceFrom( R2, new String[] { "usions", "utions", "usion", "ution" }, "u" ); + replaceFrom( R2, new String[] { "ences", "ence" }, "ent" ); - String[] search = { "atrices", "ateurs", "ations", "atrice", "ateur", "ation"}; - deleteButSuffixFromElseReplace( R2, search, "ic", true, R0, "iqU" ); + String[] search = { "atrices", "ateurs", "ations", "atrice", "ateur", "ation"}; + deleteButSuffixFromElseReplace( R2, search, "ic", true, R0, "iqU" ); - deleteButSuffixFromElseReplace( R2, new String[] { "ements", "ement" }, "eus", false, R0, "eux" ); - deleteButSuffixFrom( R2, new String[] { "ements", "ement" }, "ativ", false ); - deleteButSuffixFrom( R2, new String[] { "ements", "ement" }, "iv", false ); - deleteButSuffixFrom( R2, new String[] { "ements", "ement" }, "abl", false ); - deleteButSuffixFrom( R2, new String[] { "ements", "ement" }, "iqU", false ); + deleteButSuffixFromElseReplace( R2, new String[] { "ements", "ement" }, "eus", false, R0, "eux" ); + deleteButSuffixFrom( R2, new String[] { "ements", "ement" }, "ativ", false ); + deleteButSuffixFrom( R2, new String[] { "ements", "ement" }, "iv", false ); + deleteButSuffixFrom( R2, new String[] { "ements", "ement" }, "abl", false ); + deleteButSuffixFrom( R2, new String[] { "ements", "ement" }, "iqU", false ); - deleteFromIfTestVowelBeforeIn( R1, new String[] { "issements", "issement" }, false, R0 ); - deleteFrom( RV, new String[] { "ements", "ement" } ); + deleteFromIfTestVowelBeforeIn( R1, new String[] { "issements", "issement" }, false, R0 ); + deleteFrom( RV, new String[] { "ements", "ement" } ); - deleteButSuffixFromElseReplace( R2, new String[] { "ités", "ité" }, "abil", false, R0, "abl" ); - deleteButSuffixFromElseReplace( R2, new String[] { "ités", "ité" }, "ic", false, R0, "iqU" ); - deleteButSuffixFrom( R2, new String[] { "ités", "ité" }, "iv", true ); + deleteButSuffixFromElseReplace( R2, new String[] { "ités", "ité" }, "abil", false, R0, "abl" ); + deleteButSuffixFromElseReplace( R2, new String[] { "ités", "ité" }, "ic", false, R0, "iqU" ); + deleteButSuffixFrom( R2, new String[] { "ités", "ité" }, "iv", true ); - String[] autre = { "ifs", "ives", "if", "ive" }; - deleteButSuffixFromElseReplace( R2, autre, "icat", false, R0, "iqU" ); - deleteButSuffixFromElseReplace( R2, autre, "at", true, R2, "iqU" ); + String[] autre = { "ifs", "ives", "if", "ive" }; + deleteButSuffixFromElseReplace( R2, autre, "icat", false, R0, "iqU" ); + deleteButSuffixFromElseReplace( R2, autre, "at", true, R2, "iqU" ); - replaceFrom( R0, new String[] { "eaux" }, "eau" ); + replaceFrom( R0, new String[] { "eaux" }, "eau" ); - replaceFrom( R1, new String[] { "aux" }, "al" ); + replaceFrom( R1, new String[] { "aux" }, "al" ); - deleteButSuffixFromElseReplace( R2, new String[] { "euses", "euse" }, "", true, R1, "eux" ); + deleteButSuffixFromElseReplace( R2, new String[] { "euses", "euse" }, "", true, R1, "eux" ); - deleteFrom( R2, new String[] { "eux" } ); + deleteFrom( R2, new String[] { "eux" } ); - // if one of the next steps is performed, we will need to perform step2a - boolean temp = false; - temp = replaceFrom( RV, new String[] { "amment" }, "ant" ); - if (temp == true) - suite = true; - temp = replaceFrom( RV, new String[] { "emment" }, "ent" ); - if (temp == true) - suite = true; - temp = deleteFromIfTestVowelBeforeIn( RV, new String[] { "ments", "ment" }, true, RV ); - if (temp == true) - suite = true; + // if one of the next steps is performed, we will need to perform step2a + boolean temp = false; + temp = replaceFrom( RV, new String[] { "amment" }, "ant" ); + if (temp == true) + suite = true; + temp = replaceFrom( RV, new String[] { "emment" }, "ent" ); + if (temp == true) + suite = true; + temp = deleteFromIfTestVowelBeforeIn( RV, new String[] { "ments", "ment" }, true, RV ); + if (temp == true) + suite = true; - } + } - /** - * Second step (A) of the Porter Algorithm
- * Will be performed if nothing changed from the first step - * or changed were done in the amment, emment, ments or ment suffixes
- * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation - * - * @return boolean - true if something changed in the StringBuilder - */ - private boolean step2a() { - String[] search = { "îmes", "îtes", "iraIent", "irait", "irais", "irai", "iras", "ira", - "irent", "iriez", "irez", "irions", "irons", "iront", - "issaIent", "issais", "issantes", "issante", "issants", "issant", - "issait", "issais", "issions", "issons", "issiez", "issez", "issent", - "isses", "isse", "ir", "is", "ît", "it", "ies", "ie", "i" }; - return deleteFromIfTestVowelBeforeIn( RV, search, false, RV ); - } + /** + * Second step (A) of the Porter Algorithm
+ * Will be performed if nothing changed from the first step + * or changed were done in the amment, emment, ments or ment suffixes
+ * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation + * + * @return boolean - true if something changed in the StringBuilder + */ + private boolean step2a() { + String[] search = { "îmes", "îtes", "iraIent", "irait", "irais", "irai", "iras", "ira", + "irent", "iriez", "irez", "irions", "irons", "iront", + "issaIent", "issais", "issantes", "issante", "issants", "issant", + "issait", "issais", "issions", "issons", "issiez", "issez", "issent", + "isses", "isse", "ir", "is", "ît", "it", "ies", "ie", "i" }; + return deleteFromIfTestVowelBeforeIn( RV, search, false, RV ); + } - /** - * Second step (B) of the Porter Algorithm
- * Will be performed if step 2 A was performed unsuccessfully
- * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation - */ - private void step2b() { - String[] suffix = { "eraIent", "erais", "erait", "erai", "eras", "erions", "eriez", - "erons", "eront","erez", "èrent", "era", "ées", "iez", - "ée", "és", "er", "ez", "é" }; - deleteFrom( RV, suffix ); + /** + * Second step (B) of the Porter Algorithm
+ * Will be performed if step 2 A was performed unsuccessfully
+ * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation + */ + private void step2b() { + String[] suffix = { "eraIent", "erais", "erait", "erai", "eras", "erions", "eriez", + "erons", "eront","erez", "èrent", "era", "ées", "iez", + "ée", "és", "er", "ez", "é" }; + deleteFrom( RV, suffix ); - String[] search = { "assions", "assiez", "assent", "asses", "asse", "aIent", - "antes", "aIent", "Aient", "ante", "âmes", "âtes", "ants", "ant", - "ait", "aît", "ais", "Ait", "Aît", "Ais", "ât", "as", "ai", "Ai", "a" }; - deleteButSuffixFrom( RV, search, "e", true ); + String[] search = { "assions", "assiez", "assent", "asses", "asse", "aIent", + "antes", "aIent", "Aient", "ante", "âmes", "âtes", "ants", "ant", + "ait", "aît", "ais", "Ait", "Aît", "Ais", "ât", "as", "ai", "Ai", "a" }; + deleteButSuffixFrom( RV, search, "e", true ); - deleteFrom( R2, new String[] { "ions" } ); - } + deleteFrom( R2, new String[] { "ions" } ); + } - /** - * Third step of the Porter Algorithm
- * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation - */ - private void step3() { - if (sb.length()>0) - { - char ch = sb.charAt( sb.length()-1 ); - if (ch == 'Y') - { - sb.setCharAt( sb.length()-1, 'i' ); - setStrings(); - } - else if (ch == 'ç') - { - sb.setCharAt( sb.length()-1, 'c' ); - setStrings(); - } - } - } + /** + * Third step of the Porter Algorithm
+ * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation + */ + private void step3() { + if (sb.length()>0) + { + char ch = sb.charAt( sb.length()-1 ); + if (ch == 'Y') + { + sb.setCharAt( sb.length()-1, 'i' ); + setStrings(); + } + else if (ch == 'ç') + { + sb.setCharAt( sb.length()-1, 'c' ); + setStrings(); + } + } + } - /** - * Fourth step of the Porter Algorithm
- * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation - */ - private void step4() { - if (sb.length() > 1) - { - char ch = sb.charAt( sb.length()-1 ); - if (ch == 's') - { - char b = sb.charAt( sb.length()-2 ); - if (b != 'a' && b != 'i' && b != 'o' && b != 'u' && b != 'è' && b != 's') - { - sb.delete( sb.length() - 1, sb.length()); - setStrings(); - } - } - } - boolean found = deleteFromIfPrecededIn( R2, new String[] { "ion" }, RV, "s" ); - if (!found) - found = deleteFromIfPrecededIn( R2, new String[] { "ion" }, RV, "t" ); + /** + * Fourth step of the Porter Algorithm
+ * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation + */ + private void step4() { + if (sb.length() > 1) + { + char ch = sb.charAt( sb.length()-1 ); + if (ch == 's') + { + char b = sb.charAt( sb.length()-2 ); + if (b != 'a' && b != 'i' && b != 'o' && b != 'u' && b != 'è' && b != 's') + { + sb.delete( sb.length() - 1, sb.length()); + setStrings(); + } + } + } + boolean found = deleteFromIfPrecededIn( R2, new String[] { "ion" }, RV, "s" ); + if (!found) + found = deleteFromIfPrecededIn( R2, new String[] { "ion" }, RV, "t" ); - replaceFrom( RV, new String[] { "Ière", "ière", "Ier", "ier" }, "i" ); - deleteFrom( RV, new String[] { "e" } ); - deleteFromIfPrecededIn( RV, new String[] { "ë" }, R0, "gu" ); - } + replaceFrom( RV, new String[] { "Ière", "ière", "Ier", "ier" }, "i" ); + deleteFrom( RV, new String[] { "e" } ); + deleteFromIfPrecededIn( RV, new String[] { "ë" }, R0, "gu" ); + } - /** - * Fifth step of the Porter Algorithm
- * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation - */ - private void step5() { - if (R0 != null) - { - if (R0.endsWith("enn") || R0.endsWith("onn") || R0.endsWith("ett") || R0.endsWith("ell") || R0.endsWith("eill")) - { - sb.delete( sb.length() - 1, sb.length() ); - setStrings(); - } - } - } + /** + * Fifth step of the Porter Algorithm
+ * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation + */ + private void step5() { + if (R0 != null) + { + if (R0.endsWith("enn") || R0.endsWith("onn") || R0.endsWith("ett") || R0.endsWith("ell") || R0.endsWith("eill")) + { + sb.delete( sb.length() - 1, sb.length() ); + setStrings(); + } + } + } - /** - * Sixth (and last!) step of the Porter Algorithm
- * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation - */ - private void step6() { - if (R0!=null && R0.length()>0) - { - boolean seenVowel = false; - boolean seenConson = false; - int pos = -1; - for (int i = R0.length()-1; i > -1; i--) - { - char ch = R0.charAt(i); - if (isVowel(ch)) - { - if (!seenVowel) - { - if (ch == 'é' || ch == 'è') - { - pos = i; - break; - } - } - seenVowel = true; - } - else - { - if (seenVowel) - break; - else - seenConson = true; - } - } - if (pos > -1 && seenConson && !seenVowel) - sb.setCharAt(pos, 'e'); - } - } + /** + * Sixth (and last!) step of the Porter Algorithm
+ * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation + */ + private void step6() { + if (R0!=null && R0.length()>0) + { + boolean seenVowel = false; + boolean seenConson = false; + int pos = -1; + for (int i = R0.length()-1; i > -1; i--) + { + char ch = R0.charAt(i); + if (isVowel(ch)) + { + if (!seenVowel) + { + if (ch == 'é' || ch == 'è') + { + pos = i; + break; + } + } + seenVowel = true; + } + else + { + if (seenVowel) + break; + else + seenConson = true; + } + } + if (pos > -1 && seenConson && !seenVowel) + sb.setCharAt(pos, 'e'); + } + } - /** - * Delete a suffix searched in zone "source" if zone "from" contains prefix + search string - * - * @param source java.lang.String - the primary source zone for search - * @param search java.lang.String[] - the strings to search for suppression - * @param from java.lang.String - the secondary source zone for search - * @param prefix java.lang.String - the prefix to add to the search string to test - * @return boolean - true if modified - */ - private boolean deleteFromIfPrecededIn( String source, String[] search, String from, String prefix ) { - boolean found = false; - if (source!=null ) - { - for (int i = 0; i < search.length; i++) { - if ( source.endsWith( search[i] )) - { - if (from!=null && from.endsWith( prefix + search[i] )) - { - sb.delete( sb.length() - search[i].length(), sb.length()); - found = true; - setStrings(); - break; - } - } - } - } - return found; - } + /** + * Delete a suffix searched in zone "source" if zone "from" contains prefix + search string + * + * @param source java.lang.String - the primary source zone for search + * @param search java.lang.String[] - the strings to search for suppression + * @param from java.lang.String - the secondary source zone for search + * @param prefix java.lang.String - the prefix to add to the search string to test + * @return boolean - true if modified + */ + private boolean deleteFromIfPrecededIn( String source, String[] search, String from, String prefix ) { + boolean found = false; + if (source!=null ) + { + for (int i = 0; i < search.length; i++) { + if ( source.endsWith( search[i] )) + { + if (from!=null && from.endsWith( prefix + search[i] )) + { + sb.delete( sb.length() - search[i].length(), sb.length()); + found = true; + setStrings(); + break; + } + } + } + } + return found; + } - /** - * Delete a suffix searched in zone "source" if the preceding letter is (or isn't) a vowel - * - * @param source java.lang.String - the primary source zone for search - * @param search java.lang.String[] - the strings to search for suppression - * @param vowel boolean - true if we need a vowel before the search string - * @param from java.lang.String - the secondary source zone for search (where vowel could be) - * @return boolean - true if modified - */ - private boolean deleteFromIfTestVowelBeforeIn( String source, String[] search, boolean vowel, String from ) { - boolean found = false; - if (source!=null && from!=null) - { - for (int i = 0; i < search.length; i++) { - if ( source.endsWith( search[i] )) - { - if ((search[i].length() + 1) <= from.length()) - { - boolean test = isVowel(sb.charAt(sb.length()-(search[i].length()+1))); - if (test == vowel) - { - sb.delete( sb.length() - search[i].length(), sb.length()); - modified = true; - found = true; - setStrings(); - break; - } - } - } - } - } - return found; - } + /** + * Delete a suffix searched in zone "source" if the preceding letter is (or isn't) a vowel + * + * @param source java.lang.String - the primary source zone for search + * @param search java.lang.String[] - the strings to search for suppression + * @param vowel boolean - true if we need a vowel before the search string + * @param from java.lang.String - the secondary source zone for search (where vowel could be) + * @return boolean - true if modified + */ + private boolean deleteFromIfTestVowelBeforeIn( String source, String[] search, boolean vowel, String from ) { + boolean found = false; + if (source!=null && from!=null) + { + for (int i = 0; i < search.length; i++) { + if ( source.endsWith( search[i] )) + { + if ((search[i].length() + 1) <= from.length()) + { + boolean test = isVowel(sb.charAt(sb.length()-(search[i].length()+1))); + if (test == vowel) + { + sb.delete( sb.length() - search[i].length(), sb.length()); + modified = true; + found = true; + setStrings(); + break; + } + } + } + } + } + return found; + } - /** - * Delete a suffix searched in zone "source" if preceded by the prefix - * - * @param source java.lang.String - the primary source zone for search - * @param search java.lang.String[] - the strings to search for suppression - * @param prefix java.lang.String - the prefix to add to the search string to test - * @param without boolean - true if it will be deleted even without prefix found - */ - private void deleteButSuffixFrom( String source, String[] search, String prefix, boolean without ) { - if (source!=null) - { - for (int i = 0; i < search.length; i++) { - if ( source.endsWith( prefix + search[i] )) - { - sb.delete( sb.length() - (prefix.length() + search[i].length()), sb.length() ); - modified = true; - setStrings(); - break; - } - else if ( without && source.endsWith( search[i] )) - { - sb.delete( sb.length() - search[i].length(), sb.length() ); - modified = true; - setStrings(); - break; - } - } - } - } + /** + * Delete a suffix searched in zone "source" if preceded by the prefix + * + * @param source java.lang.String - the primary source zone for search + * @param search java.lang.String[] - the strings to search for suppression + * @param prefix java.lang.String - the prefix to add to the search string to test + * @param without boolean - true if it will be deleted even without prefix found + */ + private void deleteButSuffixFrom( String source, String[] search, String prefix, boolean without ) { + if (source!=null) + { + for (int i = 0; i < search.length; i++) { + if ( source.endsWith( prefix + search[i] )) + { + sb.delete( sb.length() - (prefix.length() + search[i].length()), sb.length() ); + modified = true; + setStrings(); + break; + } + else if ( without && source.endsWith( search[i] )) + { + sb.delete( sb.length() - search[i].length(), sb.length() ); + modified = true; + setStrings(); + break; + } + } + } + } - /** - * Delete a suffix searched in zone "source" if preceded by prefix
- * or replace it with the replace string if preceded by the prefix in the zone "from"
- * or delete the suffix if specified - * - * @param source java.lang.String - the primary source zone for search - * @param search java.lang.String[] - the strings to search for suppression - * @param prefix java.lang.String - the prefix to add to the search string to test - * @param without boolean - true if it will be deleted even without prefix found - */ - private void deleteButSuffixFromElseReplace( String source, String[] search, String prefix, boolean without, String from, String replace ) { - if (source!=null) - { - for (int i = 0; i < search.length; i++) { - if ( source.endsWith( prefix + search[i] )) - { - sb.delete( sb.length() - (prefix.length() + search[i].length()), sb.length() ); - modified = true; - setStrings(); - break; - } - else if ( from!=null && from.endsWith( prefix + search[i] )) - { - sb.replace( sb.length() - (prefix.length() + search[i].length()), sb.length(), replace ); - modified = true; - setStrings(); - break; - } - else if ( without && source.endsWith( search[i] )) - { - sb.delete( sb.length() - search[i].length(), sb.length() ); - modified = true; - setStrings(); - break; - } - } - } - } + /** + * Delete a suffix searched in zone "source" if preceded by prefix
+ * or replace it with the replace string if preceded by the prefix in the zone "from"
+ * or delete the suffix if specified + * + * @param source java.lang.String - the primary source zone for search + * @param search java.lang.String[] - the strings to search for suppression + * @param prefix java.lang.String - the prefix to add to the search string to test + * @param without boolean - true if it will be deleted even without prefix found + */ + private void deleteButSuffixFromElseReplace( String source, String[] search, String prefix, boolean without, String from, String replace ) { + if (source!=null) + { + for (int i = 0; i < search.length; i++) { + if ( source.endsWith( prefix + search[i] )) + { + sb.delete( sb.length() - (prefix.length() + search[i].length()), sb.length() ); + modified = true; + setStrings(); + break; + } + else if ( from!=null && from.endsWith( prefix + search[i] )) + { + sb.replace( sb.length() - (prefix.length() + search[i].length()), sb.length(), replace ); + modified = true; + setStrings(); + break; + } + else if ( without && source.endsWith( search[i] )) + { + sb.delete( sb.length() - search[i].length(), sb.length() ); + modified = true; + setStrings(); + break; + } + } + } + } - /** - * Replace a search string with another within the source zone - * - * @param source java.lang.String - the source zone for search - * @param search java.lang.String[] - the strings to search for replacement - * @param replace java.lang.String - the replacement string - */ - private boolean replaceFrom( String source, String[] search, String replace ) { - boolean found = false; - if (source!=null) - { - for (int i = 0; i < search.length; i++) { - if ( source.endsWith( search[i] )) - { - sb.replace( sb.length() - search[i].length(), sb.length(), replace ); - modified = true; - found = true; - setStrings(); - break; - } - } - } - return found; - } + /** + * Replace a search string with another within the source zone + * + * @param source java.lang.String - the source zone for search + * @param search java.lang.String[] - the strings to search for replacement + * @param replace java.lang.String - the replacement string + */ + private boolean replaceFrom( String source, String[] search, String replace ) { + boolean found = false; + if (source!=null) + { + for (int i = 0; i < search.length; i++) { + if ( source.endsWith( search[i] )) + { + sb.replace( sb.length() - search[i].length(), sb.length(), replace ); + modified = true; + found = true; + setStrings(); + break; + } + } + } + return found; + } - /** - * Delete a search string within the source zone - * - * @param source the source zone for search - * @param suffix the strings to search for suppression - */ - private void deleteFrom(String source, String[] suffix ) { - if (source!=null) - { - for (int i = 0; i < suffix.length; i++) { - if (source.endsWith( suffix[i] )) - { - sb.delete( sb.length() - suffix[i].length(), sb.length()); - modified = true; - setStrings(); - break; - } - } - } - } + /** + * Delete a search string within the source zone + * + * @param source the source zone for search + * @param suffix the strings to search for suppression + */ + private void deleteFrom(String source, String[] suffix ) { + if (source!=null) + { + for (int i = 0; i < suffix.length; i++) { + if (source.endsWith( suffix[i] )) + { + sb.delete( sb.length() - suffix[i].length(), sb.length()); + modified = true; + setStrings(); + break; + } + } + } + } - /** - * Test if a char is a french vowel, including accentuated ones - * - * @param ch the char to test - * @return boolean - true if the char is a vowel - */ - private boolean isVowel(char ch) { - switch (ch) - { - case 'a': - case 'e': - case 'i': - case 'o': - case 'u': - case 'y': - case 'â': - case 'à': - case 'ë': - case 'é': - case 'ê': - case 'è': - case 'ï': - case 'î': - case 'ô': - case 'ü': - case 'ù': - case 'û': - return true; - default: - return false; - } - } + /** + * Test if a char is a french vowel, including accentuated ones + * + * @param ch the char to test + * @return boolean - true if the char is a vowel + */ + private boolean isVowel(char ch) { + switch (ch) + { + case 'a': + case 'e': + case 'i': + case 'o': + case 'u': + case 'y': + case 'â': + case 'à': + case 'ë': + case 'é': + case 'ê': + case 'è': + case 'ï': + case 'î': + case 'ô': + case 'ü': + case 'ù': + case 'û': + return true; + default: + return false; + } + } - /** - * Retrieve the "R zone" (1 or 2 depending on the buffer) and return the corresponding string
- * "R is the region after the first non-vowel following a vowel - * or is the null region at the end of the word if there is no such non-vowel"
- * @param buffer java.lang.StringBuilder - the in buffer - * @return java.lang.String - the resulting string - */ - private String retrieveR( StringBuilder buffer ) { - int len = buffer.length(); - int pos = -1; - for (int c = 0; c < len; c++) { - if (isVowel( buffer.charAt( c ))) - { - pos = c; - break; - } - } - if (pos > -1) - { - int consonne = -1; - for (int c = pos; c < len; c++) { - if (!isVowel(buffer.charAt( c ))) - { - consonne = c; - break; - } - } - if (consonne > -1 && (consonne+1) < len) - return buffer.substring( consonne+1, len ); - else - return null; - } - else - return null; - } + /** + * Retrieve the "R zone" (1 or 2 depending on the buffer) and return the corresponding string
+ * "R is the region after the first non-vowel following a vowel + * or is the null region at the end of the word if there is no such non-vowel"
+ * @param buffer java.lang.StringBuilder - the in buffer + * @return java.lang.String - the resulting string + */ + private String retrieveR( StringBuilder buffer ) { + int len = buffer.length(); + int pos = -1; + for (int c = 0; c < len; c++) { + if (isVowel( buffer.charAt( c ))) + { + pos = c; + break; + } + } + if (pos > -1) + { + int consonne = -1; + for (int c = pos; c < len; c++) { + if (!isVowel(buffer.charAt( c ))) + { + consonne = c; + break; + } + } + if (consonne > -1 && (consonne+1) < len) + return buffer.substring( consonne+1, len ); + else + return null; + } + else + return null; + } - /** - * Retrieve the "RV zone" from a buffer an return the corresponding string
- * "If the word begins with two vowels, RV is the region after the third letter, - * otherwise the region after the first vowel not at the beginning of the word, - * or the end of the word if these positions cannot be found."
- * @param buffer java.lang.StringBuilder - the in buffer - * @return java.lang.String - the resulting string - */ - private String retrieveRV( StringBuilder buffer ) { - int len = buffer.length(); - if ( buffer.length() > 3) - { - if ( isVowel(buffer.charAt( 0 )) && isVowel(buffer.charAt( 1 ))) { - return buffer.substring(3,len); - } - else - { - int pos = 0; - for (int c = 1; c < len; c++) { - if (isVowel( buffer.charAt( c ))) - { - pos = c; - break; - } - } - if ( pos+1 < len ) - return buffer.substring( pos+1, len ); - else - return null; - } - } - else - return null; - } + /** + * Retrieve the "RV zone" from a buffer an return the corresponding string
+ * "If the word begins with two vowels, RV is the region after the third letter, + * otherwise the region after the first vowel not at the beginning of the word, + * or the end of the word if these positions cannot be found."
+ * @param buffer java.lang.StringBuilder - the in buffer + * @return java.lang.String - the resulting string + */ + private String retrieveRV( StringBuilder buffer ) { + int len = buffer.length(); + if ( buffer.length() > 3) + { + if ( isVowel(buffer.charAt( 0 )) && isVowel(buffer.charAt( 1 ))) { + return buffer.substring(3,len); + } + else + { + int pos = 0; + for (int c = 1; c < len; c++) { + if (isVowel( buffer.charAt( c ))) + { + pos = c; + break; + } + } + if ( pos+1 < len ) + return buffer.substring( pos+1, len ); + else + return null; + } + } + else + return null; + } /** - * Turns u and i preceded AND followed by a vowel to UpperCase
- * Turns y preceded OR followed by a vowel to UpperCase
- * Turns u preceded by q to UpperCase
+ * Turns u and i preceded AND followed by a vowel to UpperCase
+ * Turns y preceded OR followed by a vowel to UpperCase
+ * Turns u preceded by q to UpperCase
* * @param buffer java.util.StringBuilder - the buffer to treat * @return java.util.StringBuilder - the treated buffer */ private StringBuilder treatVowels( StringBuilder buffer ) { - for ( int c = 0; c < buffer.length(); c++ ) { - char ch = buffer.charAt( c ); + for ( int c = 0; c < buffer.length(); c++ ) { + char ch = buffer.charAt( c ); - if (c == 0) // first char - { - if (buffer.length()>1) - { - if (ch == 'y' && isVowel(buffer.charAt( c + 1 ))) - buffer.setCharAt( c, 'Y' ); - } - } - else if (c == buffer.length()-1) // last char - { - if (ch == 'u' && buffer.charAt( c - 1 ) == 'q') - buffer.setCharAt( c, 'U' ); - if (ch == 'y' && isVowel(buffer.charAt( c - 1 ))) - buffer.setCharAt( c, 'Y' ); - } - else // other cases - { - if (ch == 'u') - { - if (buffer.charAt( c - 1) == 'q') - buffer.setCharAt( c, 'U' ); - else if (isVowel(buffer.charAt( c - 1 )) && isVowel(buffer.charAt( c + 1 ))) - buffer.setCharAt( c, 'U' ); - } - if (ch == 'i') - { - if (isVowel(buffer.charAt( c - 1 )) && isVowel(buffer.charAt( c + 1 ))) - buffer.setCharAt( c, 'I' ); - } - if (ch == 'y') - { - if (isVowel(buffer.charAt( c - 1 )) || isVowel(buffer.charAt( c + 1 ))) - buffer.setCharAt( c, 'Y' ); - } - } - } - - return buffer; + if (c == 0) // first char + { + if (buffer.length()>1) + { + if (ch == 'y' && isVowel(buffer.charAt( c + 1 ))) + buffer.setCharAt( c, 'Y' ); + } + } + else if (c == buffer.length()-1) // last char + { + if (ch == 'u' && buffer.charAt( c - 1 ) == 'q') + buffer.setCharAt( c, 'U' ); + if (ch == 'y' && isVowel(buffer.charAt( c - 1 ))) + buffer.setCharAt( c, 'Y' ); + } + else // other cases + { + if (ch == 'u') + { + if (buffer.charAt( c - 1) == 'q') + buffer.setCharAt( c, 'U' ); + else if (isVowel(buffer.charAt( c - 1 )) && isVowel(buffer.charAt( c + 1 ))) + buffer.setCharAt( c, 'U' ); + } + if (ch == 'i') + { + if (isVowel(buffer.charAt( c - 1 )) && isVowel(buffer.charAt( c + 1 ))) + buffer.setCharAt( c, 'I' ); + } + if (ch == 'y') + { + if (isVowel(buffer.charAt( c - 1 )) || isVowel(buffer.charAt( c + 1 ))) + buffer.setCharAt( c, 'Y' ); + } + } } + return buffer; + } + /** * Checks a term if it can be processed correctly. * * @return boolean - true if, and only if, the given term consists in letters. */ private boolean isStemmable( String term ) { - boolean upper = false; - int first = -1; - for ( int c = 0; c < term.length(); c++ ) { - // Discard terms that contain non-letter characters. - if ( !Character.isLetter( term.charAt( c ) ) ) { - return false; - } - // Discard terms that contain multiple uppercase letters. - if ( Character.isUpperCase( term.charAt( c ) ) ) { - if ( upper ) { - return false; - } - // First encountered uppercase letter, set flag and save - // position. - else { - first = c; - upper = true; - } - } - } - // Discard the term if it contains a single uppercase letter that - // is not starting the term. - if ( first > 0 ) { - return false; - } - return true; + boolean upper = false; + int first = -1; + for ( int c = 0; c < term.length(); c++ ) { + // Discard terms that contain non-letter characters. + if ( !Character.isLetter( term.charAt( c ) ) ) { + return false; + } + // Discard terms that contain multiple uppercase letters. + if ( Character.isUpperCase( term.charAt( c ) ) ) { + if ( upper ) { + return false; + } + // First encountered uppercase letter, set flag and save + // position. + else { + first = c; + upper = true; + } + } } + // Discard the term if it contains a single uppercase letter that + // is not starting the term. + if ( first > 0 ) { + return false; + } + return true; + } } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchStemFilter.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchStemFilter.java (revision 1381159) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchStemFilter.java (working copy) @@ -45,46 +45,46 @@ @Deprecated public final class FrenchStemFilter extends TokenFilter { - /** - * The actual token in the input stream. - */ - private FrenchStemmer stemmer = new FrenchStemmer(); - - private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); + /** + * The actual token in the input stream. + */ + private FrenchStemmer stemmer = new FrenchStemmer(); + + private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); private final KeywordAttribute keywordAttr = addAttribute(KeywordAttribute.class); - public FrenchStemFilter( TokenStream in ) { + public FrenchStemFilter( TokenStream in ) { super(in); - } + } - /** - * @return Returns true for the next token in the stream, or false at EOS - */ - @Override - public boolean incrementToken() throws IOException { - if (input.incrementToken()) { - String term = termAtt.toString(); + /** + * @return Returns true for the next token in the stream, or false at EOS + */ + @Override + public boolean incrementToken() throws IOException { + if (input.incrementToken()) { + String term = termAtt.toString(); - // Check the exclusion table - if (!keywordAttr.isKeyword()) { - String s = stemmer.stem( term ); - // If not stemmed, don't waste the time adjusting the token. - if ((s != null) && !s.equals( term ) ) - termAtt.setEmpty().append(s); - } - return true; - } else { - return false; - } - } - /** - * Set a alternative/custom {@link FrenchStemmer} for this filter. - */ - public void setStemmer( FrenchStemmer stemmer ) { - if ( stemmer != null ) { - this.stemmer = stemmer; - } - } + // Check the exclusion table + if (!keywordAttr.isKeyword()) { + String s = stemmer.stem( term ); + // If not stemmed, don't waste the time adjusting the token. + if ((s != null) && !s.equals( term ) ) + termAtt.setEmpty().append(s); + } + return true; + } else { + return false; + } + } + /** + * Set a alternative/custom {@link FrenchStemmer} for this filter. + */ + public void setStemmer( FrenchStemmer stemmer ) { + if ( stemmer != null ) { + this.stemmer = stemmer; + } + } } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java (revision 1381159) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java (working copy) @@ -395,7 +395,7 @@ exhausted = true; } return newTarget; - } + } /** *

Fills {@link #inputWindow} with input stream tokens, if available, Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizerFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizerFactory.java (revision 1381159) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizerFactory.java (working copy) @@ -43,10 +43,10 @@ *

  * <fieldType name="descendent_path" class="solr.TextField">
  *   <analyzer type="index">
- * 	   <tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
+ *     <tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
  *   </analyzer>
  *   <analyzer type="query">
- * 	   <tokenizer class="solr.KeywordTokenizerFactory" />
+ *     <tokenizer class="solr.KeywordTokenizerFactory" />
  *   </analyzer>
  * </fieldType>
  * 
@@ -61,10 +61,10 @@ *
  * <fieldType name="descendent_path" class="solr.TextField">
  *   <analyzer type="index">
- * 	   <tokenizer class="solr.KeywordTokenizerFactory" />
+ *     <tokenizer class="solr.KeywordTokenizerFactory" />
  *   </analyzer>
  *   <analyzer type="query">
- * 	   <tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
+ *     <tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
  *   </analyzer>
  * </fieldType>
  * 
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java (revision 1381159) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java (working copy) @@ -589,7 +589,7 @@ } } - // numRead < 0 + // numRead < 0 return true; } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java (revision 1381159) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java (working copy) @@ -61,25 +61,25 @@ * * @return a set of default Czech-stopwords */ - public static final CharArraySet getDefaultStopSet(){ - return DefaultSetHolder.DEFAULT_SET; - } - - private static class DefaultSetHolder { - private static final CharArraySet DEFAULT_SET; - - static { - try { - DEFAULT_SET = WordlistLoader.getWordSet(IOUtils.getDecodingReader(CzechAnalyzer.class, - DEFAULT_STOPWORD_FILE, IOUtils.CHARSET_UTF_8), "#", Version.LUCENE_CURRENT); - } catch (IOException ex) { - // default set should always be present as it is part of the - // distribution (JAR) - throw new RuntimeException("Unable to load default stopword set"); - } - } - } + public static final CharArraySet getDefaultStopSet(){ + return DefaultSetHolder.DEFAULT_SET; + } + private static class DefaultSetHolder { + private static final CharArraySet DEFAULT_SET; + + static { + try { + DEFAULT_SET = WordlistLoader.getWordSet(IOUtils.getDecodingReader(CzechAnalyzer.class, + DEFAULT_STOPWORD_FILE, IOUtils.CHARSET_UTF_8), "#", Version.LUCENE_CURRENT); + } catch (IOException ex) { + // default set should always be present as it is part of the + // distribution (JAR) + throw new RuntimeException("Unable to load default stopword set"); + } + } + } + private final CharArraySet stemExclusionTable; @@ -89,9 +89,9 @@ * @param matchVersion Lucene version to match See * {@link above} */ - public CzechAnalyzer(Version matchVersion) { + public CzechAnalyzer(Version matchVersion) { this(matchVersion, DefaultSetHolder.DEFAULT_SET); - } + } /** * Builds an analyzer with the given stop words. Index: lucene/analysis/common/src/java/org/tartarus/snowball/Among.java =================================================================== --- lucene/analysis/common/src/java/org/tartarus/snowball/Among.java (revision 1381159) +++ lucene/analysis/common/src/java/org/tartarus/snowball/Among.java (working copy) @@ -43,25 +43,26 @@ * reflection calls (Lovins, etc) use EMPTY_ARGS/EMPTY_PARAMS */ public class Among { - private static final Class[] EMPTY_PARAMS = new Class[0]; - public Among (String s, int substring_i, int result, - String methodname, SnowballProgram methodobject) { - this.s_size = s.length(); - this.s = s.toCharArray(); - this.substring_i = substring_i; - this.result = result; - this.methodobject = methodobject; - if (methodname.length() == 0) { - this.method = null; - } else { - try { - this.method = methodobject.getClass(). - getDeclaredMethod(methodname, EMPTY_PARAMS); - } catch (NoSuchMethodException e) { - throw new RuntimeException(e); - } - } + private static final Class[] EMPTY_PARAMS = new Class[0]; + + public Among(String s, int substring_i, int result, + String methodname, SnowballProgram methodobject) { + this.s_size = s.length(); + this.s = s.toCharArray(); + this.substring_i = substring_i; + this.result = result; + this.methodobject = methodobject; + if (methodname.length() == 0) { + this.method = null; + } else { + try { + this.method = methodobject.getClass(). + getDeclaredMethod(methodname, EMPTY_PARAMS); + } catch (NoSuchMethodException e) { + throw new RuntimeException(e); + } } + } public final int s_size; /* search string */ public final char[] s; /* search string */ Index: lucene/analysis/common/src/java/org/tartarus/snowball/SnowballProgram.java =================================================================== --- lucene/analysis/common/src/java/org/tartarus/snowball/SnowballProgram.java (revision 1381159) +++ lucene/analysis/common/src/java/org/tartarus/snowball/SnowballProgram.java (working copy) @@ -51,8 +51,8 @@ protected SnowballProgram() { - current = new char[8]; - setCurrent(""); + current = new char[8]; + setCurrent(""); } public abstract boolean stem(); @@ -62,12 +62,12 @@ */ public void setCurrent(String value) { - current = value.toCharArray(); - cursor = 0; - limit = value.length(); - limit_backward = 0; - bra = cursor; - ket = limit; + current = value.toCharArray(); + cursor = 0; + limit = value.length(); + limit_backward = 0; + bra = cursor; + ket = limit; } /** @@ -130,354 +130,350 @@ protected void copy_from(SnowballProgram other) { - current = other.current; - cursor = other.cursor; - limit = other.limit; - limit_backward = other.limit_backward; - bra = other.bra; - ket = other.ket; + current = other.current; + cursor = other.cursor; + limit = other.limit; + limit_backward = other.limit_backward; + bra = other.bra; + ket = other.ket; } protected boolean in_grouping(char [] s, int min, int max) { - if (cursor >= limit) return false; - char ch = current[cursor]; - if (ch > max || ch < min) return false; - ch -= min; - if ((s[ch >> 3] & (0X1 << (ch & 0X7))) == 0) return false; - cursor++; - return true; + if (cursor >= limit) return false; + char ch = current[cursor]; + if (ch > max || ch < min) return false; + ch -= min; + if ((s[ch >> 3] & (0X1 << (ch & 0X7))) == 0) return false; + cursor++; + return true; } protected boolean in_grouping_b(char [] s, int min, int max) { - if (cursor <= limit_backward) return false; - char ch = current[cursor - 1]; - if (ch > max || ch < min) return false; - ch -= min; - if ((s[ch >> 3] & (0X1 << (ch & 0X7))) == 0) return false; - cursor--; - return true; + if (cursor <= limit_backward) return false; + char ch = current[cursor - 1]; + if (ch > max || ch < min) return false; + ch -= min; + if ((s[ch >> 3] & (0X1 << (ch & 0X7))) == 0) return false; + cursor--; + return true; } protected boolean out_grouping(char [] s, int min, int max) { - if (cursor >= limit) return false; - char ch = current[cursor]; - if (ch > max || ch < min) { - cursor++; - return true; - } - ch -= min; - if ((s[ch >> 3] & (0X1 << (ch & 0X7))) == 0) { - cursor ++; - return true; - } - return false; + if (cursor >= limit) return false; + char ch = current[cursor]; + if (ch > max || ch < min) { + cursor++; + return true; + } + ch -= min; + if ((s[ch >> 3] & (0X1 << (ch & 0X7))) == 0) { + cursor ++; + return true; + } + return false; } protected boolean out_grouping_b(char [] s, int min, int max) { - if (cursor <= limit_backward) return false; - char ch = current[cursor - 1]; - if (ch > max || ch < min) { - cursor--; - return true; - } - ch -= min; - if ((s[ch >> 3] & (0X1 << (ch & 0X7))) == 0) { - cursor--; - return true; - } - return false; + if (cursor <= limit_backward) return false; + char ch = current[cursor - 1]; + if (ch > max || ch < min) { + cursor--; + return true; + } + ch -= min; + if ((s[ch >> 3] & (0X1 << (ch & 0X7))) == 0) { + cursor--; + return true; + } + return false; } protected boolean in_range(int min, int max) { - if (cursor >= limit) return false; - char ch = current[cursor]; - if (ch > max || ch < min) return false; - cursor++; - return true; + if (cursor >= limit) return false; + char ch = current[cursor]; + if (ch > max || ch < min) return false; + cursor++; + return true; } protected boolean in_range_b(int min, int max) { - if (cursor <= limit_backward) return false; - char ch = current[cursor - 1]; - if (ch > max || ch < min) return false; - cursor--; - return true; + if (cursor <= limit_backward) return false; + char ch = current[cursor - 1]; + if (ch > max || ch < min) return false; + cursor--; + return true; } protected boolean out_range(int min, int max) { - if (cursor >= limit) return false; - char ch = current[cursor]; - if (!(ch > max || ch < min)) return false; - cursor++; - return true; + if (cursor >= limit) return false; + char ch = current[cursor]; + if (!(ch > max || ch < min)) return false; + cursor++; + return true; } protected boolean out_range_b(int min, int max) { - if (cursor <= limit_backward) return false; - char ch = current[cursor - 1]; - if(!(ch > max || ch < min)) return false; - cursor--; - return true; + if (cursor <= limit_backward) return false; + char ch = current[cursor - 1]; + if(!(ch > max || ch < min)) return false; + cursor--; + return true; } protected boolean eq_s(int s_size, CharSequence s) { - if (limit - cursor < s_size) return false; - int i; - for (i = 0; i != s_size; i++) { - if (current[cursor + i] != s.charAt(i)) return false; - } - cursor += s_size; - return true; + if (limit - cursor < s_size) return false; + int i; + for (i = 0; i != s_size; i++) { + if (current[cursor + i] != s.charAt(i)) return false; + } + cursor += s_size; + return true; } protected boolean eq_s_b(int s_size, CharSequence s) { - if (cursor - limit_backward < s_size) return false; - int i; - for (i = 0; i != s_size; i++) { - if (current[cursor - s_size + i] != s.charAt(i)) return false; - } - cursor -= s_size; - return true; + if (cursor - limit_backward < s_size) return false; + int i; + for (i = 0; i != s_size; i++) { + if (current[cursor - s_size + i] != s.charAt(i)) return false; + } + cursor -= s_size; + return true; } protected boolean eq_v(CharSequence s) { - return eq_s(s.length(), s); + return eq_s(s.length(), s); } protected boolean eq_v_b(CharSequence s) - { return eq_s_b(s.length(), s); + { + return eq_s_b(s.length(), s); } protected int find_among(Among v[], int v_size) { - int i = 0; - int j = v_size; + int i = 0; + int j = v_size; - int c = cursor; - int l = limit; + int c = cursor; + int l = limit; - int common_i = 0; - int common_j = 0; + int common_i = 0; + int common_j = 0; - boolean first_key_inspected = false; + boolean first_key_inspected = false; - while(true) { - int k = i + ((j - i) >> 1); - int diff = 0; - int common = common_i < common_j ? common_i : common_j; // smaller - Among w = v[k]; - int i2; - for (i2 = common; i2 < w.s_size; i2++) { - if (c + common == l) { - diff = -1; - break; - } - diff = current[c + common] - w.s[i2]; - if (diff != 0) break; - common++; - } - if (diff < 0) { - j = k; - common_j = common; - } else { - i = k; - common_i = common; - } - if (j - i <= 1) { - if (i > 0) break; // v->s has been inspected - if (j == i) break; // only one item in v + while (true) { + int k = i + ((j - i) >> 1); + int diff = 0; + int common = common_i < common_j ? common_i : common_j; // smaller + Among w = v[k]; + int i2; + for (i2 = common; i2 < w.s_size; i2++) { + if (c + common == l) { + diff = -1; + break; + } + diff = current[c + common] - w.s[i2]; + if (diff != 0) break; + common++; + } + if (diff < 0) { + j = k; + common_j = common; + } else { + i = k; + common_i = common; + } + if (j - i <= 1) { + if (i > 0) break; // v->s has been inspected + if (j == i) break; // only one item in v - // - but now we need to go round once more to get - // v->s inspected. This looks messy, but is actually - // the optimal approach. + // - but now we need to go round once more to get + // v->s inspected. This looks messy, but is actually + // the optimal approach. - if (first_key_inspected) break; - first_key_inspected = true; - } - } - while(true) { - Among w = v[i]; - if (common_i >= w.s_size) { - cursor = c + w.s_size; - if (w.method == null) return w.result; - boolean res; - try { - Object resobj = w.method.invoke(w.methodobject, EMPTY_ARGS); - res = resobj.toString().equals("true"); - } catch (InvocationTargetException e) { - res = false; - // FIXME - debug message - } catch (IllegalAccessException e) { - res = false; - // FIXME - debug message - } - cursor = c + w.s_size; - if (res) return w.result; - } - i = w.substring_i; - if (i < 0) return 0; - } + if (first_key_inspected) break; + first_key_inspected = true; + } + } + while (true) { + Among w = v[i]; + if (common_i >= w.s_size) { + cursor = c + w.s_size; + if (w.method == null) return w.result; + boolean res; + try { + Object resobj = w.method.invoke(w.methodobject, EMPTY_ARGS); + res = resobj.toString().equals("true"); + } catch (InvocationTargetException e) { + res = false; + // FIXME - debug message + } catch (IllegalAccessException e) { + res = false; + // FIXME - debug message + } + cursor = c + w.s_size; + if (res) return w.result; + } + i = w.substring_i; + if (i < 0) return 0; + } } - // find_among_b is for backwards processing. Same comments apply + // find_among_b is for backwards processing. Same comments apply protected int find_among_b(Among v[], int v_size) { - int i = 0; - int j = v_size; + int i = 0; + int j = v_size; - int c = cursor; - int lb = limit_backward; + int c = cursor; + int lb = limit_backward; - int common_i = 0; - int common_j = 0; + int common_i = 0; + int common_j = 0; - boolean first_key_inspected = false; + boolean first_key_inspected = false; - while(true) { - int k = i + ((j - i) >> 1); - int diff = 0; - int common = common_i < common_j ? common_i : common_j; - Among w = v[k]; - int i2; - for (i2 = w.s_size - 1 - common; i2 >= 0; i2--) { - if (c - common == lb) { - diff = -1; - break; - } - diff = current[c - 1 - common] - w.s[i2]; - if (diff != 0) break; - common++; - } - if (diff < 0) { - j = k; - common_j = common; - } else { - i = k; - common_i = common; - } - if (j - i <= 1) { - if (i > 0) break; - if (j == i) break; - if (first_key_inspected) break; - first_key_inspected = true; - } - } - while(true) { - Among w = v[i]; - if (common_i >= w.s_size) { - cursor = c - w.s_size; - if (w.method == null) return w.result; + while (true) { + int k = i + ((j - i) >> 1); + int diff = 0; + int common = common_i < common_j ? common_i : common_j; + Among w = v[k]; + int i2; + for (i2 = w.s_size - 1 - common; i2 >= 0; i2--) { + if (c - common == lb) { + diff = -1; + break; + } + diff = current[c - 1 - common] - w.s[i2]; + if (diff != 0) break; + common++; + } + if (diff < 0) { + j = k; + common_j = common; + } else { + i = k; + common_i = common; + } + if (j - i <= 1) { + if (i > 0) break; + if (j == i) break; + if (first_key_inspected) break; + first_key_inspected = true; + } + } + while (true) { + Among w = v[i]; + if (common_i >= w.s_size) { + cursor = c - w.s_size; + if (w.method == null) return w.result; - boolean res; - try { - Object resobj = w.method.invoke(w.methodobject, EMPTY_ARGS); - res = resobj.toString().equals("true"); - } catch (InvocationTargetException e) { - res = false; - // FIXME - debug message - } catch (IllegalAccessException e) { - res = false; - // FIXME - debug message - } - cursor = c - w.s_size; - if (res) return w.result; - } - i = w.substring_i; - if (i < 0) return 0; - } + boolean res; + try { + Object resobj = w.method.invoke(w.methodobject, EMPTY_ARGS); + res = resobj.toString().equals("true"); + } catch (InvocationTargetException e) { + res = false; + // FIXME - debug message + } catch (IllegalAccessException e) { + res = false; + // FIXME - debug message + } + cursor = c - w.s_size; + if (res) return w.result; + } + i = w.substring_i; + if (i < 0) return 0; + } } - /* to replace chars between c_bra and c_ket in current by the + /* to replace chars between c_bra and c_ket in current by the * chars in s. */ - protected int replace_s(int c_bra, int c_ket, CharSequence s) - { - final int adjustment = s.length() - (c_ket - c_bra); - final int newLength = limit + adjustment; - //resize if necessary - if (newLength > current.length) { - char newBuffer[] = new char[ArrayUtil.oversize(newLength, RamUsageEstimator.NUM_BYTES_CHAR)]; - System.arraycopy(current, 0, newBuffer, 0, limit); - current = newBuffer; - } - // if the substring being replaced is longer or shorter than the - // replacement, need to shift things around - if (adjustment != 0 && c_ket < limit) { - System.arraycopy(current, c_ket, current, c_bra + s.length(), - limit - c_ket); - } - // insert the replacement text - // Note, faster is s.getChars(0, s.length(), current, c_bra); - // but would have to duplicate this method for both String and StringBuilder - for (int i = 0; i < s.length(); i++) - current[c_bra + i] = s.charAt(i); - - limit += adjustment; - if (cursor >= c_ket) cursor += adjustment; - else if (cursor > c_bra) cursor = c_bra; - return adjustment; + protected int replace_s(int c_bra, int c_ket, CharSequence s) { + final int adjustment = s.length() - (c_ket - c_bra); + final int newLength = limit + adjustment; + //resize if necessary + if (newLength > current.length) { + char newBuffer[] = new char[ArrayUtil.oversize(newLength, RamUsageEstimator.NUM_BYTES_CHAR)]; + System.arraycopy(current, 0, newBuffer, 0, limit); + current = newBuffer; } - - protected void slice_check() - { - if (bra < 0 || - bra > ket || - ket > limit) - { - throw new IllegalArgumentException("faulty slice operation: bra=" + bra + ",ket=" + ket + ",limit=" + limit); - // FIXME: report error somehow. - /* - fprintf(stderr, "faulty slice operation:\n"); - debug(z, -1, 0); - exit(1); - */ - } + // if the substring being replaced is longer or shorter than the + // replacement, need to shift things around + if (adjustment != 0 && c_ket < limit) { + System.arraycopy(current, c_ket, current, c_bra + s.length(), + limit - c_ket); } + // insert the replacement text + // Note, faster is s.getChars(0, s.length(), current, c_bra); + // but would have to duplicate this method for both String and StringBuilder + for (int i = 0; i < s.length(); i++) + current[c_bra + i] = s.charAt(i); - protected void slice_from(CharSequence s) - { - slice_check(); - replace_s(bra, ket, s); + limit += adjustment; + if (cursor >= c_ket) cursor += adjustment; + else if (cursor > c_bra) cursor = c_bra; + return adjustment; + } + + protected void slice_check() { + if (bra < 0 || + bra > ket || + ket > limit) { + throw new IllegalArgumentException("faulty slice operation: bra=" + bra + ",ket=" + ket + ",limit=" + limit); + // FIXME: report error somehow. + /* + fprintf(stderr, "faulty slice operation:\n"); + debug(z, -1, 0); + exit(1); + */ } - - protected void slice_del() - { - slice_from((CharSequence)""); - } + } - protected void insert(int c_bra, int c_ket, CharSequence s) + protected void slice_from(CharSequence s) { + slice_check(); + replace_s(bra, ket, s); + } + + protected void slice_del() { + slice_from((CharSequence) ""); + } + + protected void insert(int c_bra, int c_ket, CharSequence s) { - int adjustment = replace_s(c_bra, c_ket, s); - if (c_bra <= bra) bra += adjustment; - if (c_bra <= ket) ket += adjustment; + int adjustment = replace_s(c_bra, c_ket, s); + if (c_bra <= bra) bra += adjustment; + if (c_bra <= ket) ket += adjustment; } /* Copy the slice into the supplied StringBuffer */ protected StringBuilder slice_to(StringBuilder s) { - slice_check(); - int len = ket - bra; - s.setLength(0); - s.append(current, bra, len); - return s; + slice_check(); + int len = ket - bra; + s.setLength(0); + s.append(current, bra, len); + return s; } protected StringBuilder assign_to(StringBuilder s) { - s.setLength(0); - s.append(current, 0, limit); - return s; + s.setLength(0); + s.append(current, 0, limit); + return s; } /* Index: lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilterFactory.java =================================================================== --- lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilterFactory.java (revision 1381159) +++ lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilterFactory.java (working copy) @@ -175,23 +175,23 @@ } public void testSpeed() throws Exception { - checkSpeedEncoding("Metaphone", "easgasg", "ESKS"); - checkSpeedEncoding("DoubleMetaphone", "easgasg", "ASKS"); - checkSpeedEncoding("Soundex", "easgasg", "E220"); - checkSpeedEncoding("RefinedSoundex", "easgasg", "E034034"); - checkSpeedEncoding("Caverphone", "Carlene", "KLN1111111"); - checkSpeedEncoding("ColognePhonetic", "Schmitt", "862"); + checkSpeedEncoding("Metaphone", "easgasg", "ESKS"); + checkSpeedEncoding("DoubleMetaphone", "easgasg", "ASKS"); + checkSpeedEncoding("Soundex", "easgasg", "E220"); + checkSpeedEncoding("RefinedSoundex", "easgasg", "E034034"); + checkSpeedEncoding("Caverphone", "Carlene", "KLN1111111"); + checkSpeedEncoding("ColognePhonetic", "Schmitt", "862"); } private void checkSpeedEncoding(String encoder, String toBeEncoded, String estimated) throws Exception { - long start = System.currentTimeMillis(); - for ( int i=0; i"); - sb.append(originalText); - sb.append(""); - return sb.toString(); + @Override + public String highlightTerm(String originalText, TokenGroup tokenGroup) { + if (tokenGroup.getTotalScore() == 0) + return originalText; + float score = tokenGroup.getTotalScore(); + if (score == 0) { + return originalText; } - // guess how much extra text we'll add to the text we're highlighting to try to avoid a StringBuilder resize - private static final String TEMPLATE = "..."; - private static final int EXTRA = TEMPLATE.length(); + // try to size sb correctly + StringBuilder sb = new StringBuilder(originalText.length() + EXTRA); + + sb.append(""); + sb.append(originalText); + sb.append(""); + return sb.toString(); + } + + // guess how much extra text we'll add to the text we're highlighting to try to avoid a StringBuilder resize + private static final String TEMPLATE = "..."; + private static final int EXTRA = TEMPLATE.length(); } Index: lucene/highlighter/src/java/org/apache/lucene/search/highlight/TextFragment.java =================================================================== --- lucene/highlighter/src/java/org/apache/lucene/search/highlight/TextFragment.java (revision 1381159) +++ lucene/highlighter/src/java/org/apache/lucene/search/highlight/TextFragment.java (working copy) @@ -25,57 +25,57 @@ */ public class TextFragment { - CharSequence markedUpText; - int fragNum; - int textStartPos; - int textEndPos; - float score; + CharSequence markedUpText; + int fragNum; + int textStartPos; + int textEndPos; + float score; - public TextFragment(CharSequence markedUpText,int textStartPos, int fragNum) - { - this.markedUpText=markedUpText; - this.textStartPos = textStartPos; - this.fragNum = fragNum; - } + public TextFragment(CharSequence markedUpText,int textStartPos, int fragNum) + { + this.markedUpText=markedUpText; + this.textStartPos = textStartPos; + this.fragNum = fragNum; + } - void setScore(float score) - { - this.score=score; - } - public float getScore() - { - return score; - } - /** - * @param frag2 Fragment to be merged into this one - */ + void setScore(float score) + { + this.score=score; + } + public float getScore() + { + return score; + } + /** + * @param frag2 Fragment to be merged into this one + */ public void merge(TextFragment frag2) { textEndPos = frag2.textEndPos; score=Math.max(score,frag2.score); } /** - * @param fragment - * @return true if this fragment follows the one passed - */ - public boolean follows(TextFragment fragment) - { - return textStartPos == fragment.textEndPos; - } + * @param fragment + * @return true if this fragment follows the one passed + */ + public boolean follows(TextFragment fragment) + { + return textStartPos == fragment.textEndPos; + } - /** - * @return the fragment sequence number - */ - public int getFragNum() - { - return fragNum; - } + /** + * @return the fragment sequence number + */ + public int getFragNum() + { + return fragNum; + } - /* Returns the marked-up text for this text fragment - */ - @Override - public String toString() { - return markedUpText.subSequence(textStartPos, textEndPos).toString(); - } + /* Returns the marked-up text for this text fragment + */ + @Override + public String toString() { + return markedUpText.subSequence(textStartPos, textEndPos).toString(); + } } Index: lucene/highlighter/src/java/org/apache/lucene/search/highlight/SimpleHTMLEncoder.java =================================================================== --- lucene/highlighter/src/java/org/apache/lucene/search/highlight/SimpleHTMLEncoder.java (revision 1381159) +++ lucene/highlighter/src/java/org/apache/lucene/search/highlight/SimpleHTMLEncoder.java (working copy) @@ -21,61 +21,61 @@ */ public class SimpleHTMLEncoder implements Encoder { - public SimpleHTMLEncoder() - { - } + public SimpleHTMLEncoder() + { + } - public String encodeText(String originalText) - { - return htmlEncode(originalText); - } - - /** - * Encode string into HTML - */ - public final static String htmlEncode(String plainText) - { - if (plainText == null || plainText.length() == 0) - { - return ""; - } + public String encodeText(String originalText) + { + return htmlEncode(originalText); + } - StringBuilder result = new StringBuilder(plainText.length()); + /** + * Encode string into HTML + */ + public final static String htmlEncode(String plainText) + { + if (plainText == null || plainText.length() == 0) + { + return ""; + } - for (int index=0; index': - result.append(">"); - break; + case '<': + result.append("<"); + break; - default: - if (ch < 128) - { - result.append(ch); - } - else - { - result.append("&#").append((int)ch).append(";"); - } - } - } + case '>': + result.append(">"); + break; - return result.toString(); - } + default: + if (ch < 128) + { + result.append(ch); + } + else + { + result.append("&#").append((int)ch).append(";"); + } + } + } + + return result.toString(); + } } \ No newline at end of file Index: lucene/highlighter/src/java/org/apache/lucene/search/highlight/Encoder.java =================================================================== --- lucene/highlighter/src/java/org/apache/lucene/search/highlight/Encoder.java (revision 1381159) +++ lucene/highlighter/src/java/org/apache/lucene/search/highlight/Encoder.java (working copy) @@ -22,8 +22,8 @@ */ public interface Encoder { - /** - * @param originalText The section of text being output - */ - String encodeText(String originalText); + /** + * @param originalText The section of text being output + */ + String encodeText(String originalText); } \ No newline at end of file Index: lucene/highlighter/src/java/org/apache/lucene/search/highlight/GradientFormatter.java =================================================================== --- lucene/highlighter/src/java/org/apache/lucene/search/highlight/GradientFormatter.java (revision 1381159) +++ lucene/highlighter/src/java/org/apache/lucene/search/highlight/GradientFormatter.java (working copy) @@ -42,7 +42,7 @@ * * @param maxScore * The score (and above) displayed as maxColor (See QueryScorer.getMaxWeight - * which can be used to calibrate scoring scale) + * which can be used to calibrate scoring scale) * @param minForegroundColor * The hex color used for representing IDF scores of zero eg * #FFFFFF (white) or null if no foreground color required Index: lucene/highlighter/src/java/org/apache/lucene/search/highlight/DefaultEncoder.java =================================================================== --- lucene/highlighter/src/java/org/apache/lucene/search/highlight/DefaultEncoder.java (revision 1381159) +++ lucene/highlighter/src/java/org/apache/lucene/search/highlight/DefaultEncoder.java (working copy) @@ -21,12 +21,12 @@ */ public class DefaultEncoder implements Encoder { - public DefaultEncoder() - { - } + public DefaultEncoder() + { + } - public String encodeText(String originalText) - { - return originalText; - } + public String encodeText(String originalText) + { + return originalText; + } } \ No newline at end of file Index: lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedTerm.java =================================================================== --- lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedTerm.java (revision 1381159) +++ lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedTerm.java (working copy) @@ -20,45 +20,45 @@ */ public class WeightedTerm { - float weight; // multiplier - String term; //stemmed form - public WeightedTerm (float weight,String term) - { - this.weight=weight; - this.term=term; - } - - - /** - * @return the term value (stemmed) - */ - public String getTerm() - { - return term; - } + float weight; // multiplier + String term; //stemmed form + public WeightedTerm (float weight,String term) + { + this.weight=weight; + this.term=term; + } - /** - * @return the weight associated with this term - */ - public float getWeight() - { - return weight; - } - /** - * @param term the term value (stemmed) - */ - public void setTerm(String term) - { - this.term = term; - } + /** + * @return the term value (stemmed) + */ + public String getTerm() + { + return term; + } - /** - * @param weight the weight associated with this term - */ - public void setWeight(float weight) - { - this.weight = weight; - } + /** + * @return the weight associated with this term + */ + public float getWeight() + { + return weight; + } + /** + * @param term the term value (stemmed) + */ + public void setTerm(String term) + { + this.term = term; + } + + /** + * @param weight the weight associated with this term + */ + public void setWeight(float weight) + { + this.weight = weight; + } + } Index: lucene/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermExtractor.java =================================================================== --- lucene/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermExtractor.java (revision 1381159) +++ lucene/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermExtractor.java (working copy) @@ -37,126 +37,118 @@ public final class QueryTermExtractor { - /** - * Extracts all terms texts of a given Query into an array of WeightedTerms - * - * @param query Query to extract term texts from - * @return an array of the terms used in a query, plus their weights. - */ - public static final WeightedTerm[] getTerms(Query query) - { - return getTerms(query,false); - } + /** + * Extracts all terms texts of a given Query into an array of WeightedTerms + * + * @param query Query to extract term texts from + * @return an array of the terms used in a query, plus their weights. + */ + public static final WeightedTerm[] getTerms(Query query) + { + return getTerms(query,false); + } - /** - * Extracts all terms texts of a given Query into an array of WeightedTerms - * - * @param query Query to extract term texts from - * @param reader used to compute IDF which can be used to a) score selected fragments better - * b) use graded highlights eg changing intensity of font color - * @param fieldName the field on which Inverse Document Frequency (IDF) calculations are based - * @return an array of the terms used in a query, plus their weights. - */ - public static final WeightedTerm[] getIdfWeightedTerms(Query query, IndexReader reader, String fieldName) - { - WeightedTerm[] terms=getTerms(query,false, fieldName); - int totalNumDocs=reader.maxDoc(); - for (int i = 0; i < terms.length; i++) + /** + * Extracts all terms texts of a given Query into an array of WeightedTerms + * + * @param query Query to extract term texts from + * @param reader used to compute IDF which can be used to a) score selected fragments better + * b) use graded highlights eg changing intensity of font color + * @param fieldName the field on which Inverse Document Frequency (IDF) calculations are based + * @return an array of the terms used in a query, plus their weights. + */ + public static final WeightedTerm[] getIdfWeightedTerms(Query query, IndexReader reader, String fieldName) + { + WeightedTerm[] terms=getTerms(query,false, fieldName); + int totalNumDocs=reader.maxDoc(); + for (int i = 0; i < terms.length; i++) { - try + try { int docFreq=reader.docFreq(new Term(fieldName,terms[i].term)); //IDF algorithm taken from DefaultSimilarity class float idf=(float)(Math.log(totalNumDocs/(double)(docFreq+1)) + 1.0); terms[i].weight*=idf; } - catch (IOException e) + catch (IOException e) { - //ignore + //ignore } } - return terms; - } + return terms; + } - /** - * Extracts all terms texts of a given Query into an array of WeightedTerms - * - * @param query Query to extract term texts from - * @param prohibited true to extract "prohibited" terms, too - * @param fieldName The fieldName used to filter query terms + /** + * Extracts all terms texts of a given Query into an array of WeightedTerms + * + * @param query Query to extract term texts from + * @param prohibited true to extract "prohibited" terms, too + * @param fieldName The fieldName used to filter query terms * @return an array of the terms used in a query, plus their weights. */ - public static final WeightedTerm[] getTerms(Query query, boolean prohibited, String fieldName) - { - HashSet terms=new HashSet(); - getTerms(query,terms,prohibited,fieldName); - return terms.toArray(new WeightedTerm[0]); - } - - /** - * Extracts all terms texts of a given Query into an array of WeightedTerms - * - * @param query Query to extract term texts from - * @param prohibited true to extract "prohibited" terms, too + public static final WeightedTerm[] getTerms(Query query, boolean prohibited, String fieldName) + { + HashSet terms=new HashSet(); + getTerms(query,terms,prohibited,fieldName); + return terms.toArray(new WeightedTerm[0]); + } + + /** + * Extracts all terms texts of a given Query into an array of WeightedTerms + * + * @param query Query to extract term texts from + * @param prohibited true to extract "prohibited" terms, too * @return an array of the terms used in a query, plus their weights. */ - public static final WeightedTerm[] getTerms(Query query, boolean prohibited) - { - return getTerms(query,prohibited,null); - } + public static final WeightedTerm[] getTerms(Query query, boolean prohibited) + { + return getTerms(query,prohibited,null); + } - private static final void getTerms(Query query, HashSet terms,boolean prohibited, String fieldName) - { - try - { - if (query instanceof BooleanQuery) - getTermsFromBooleanQuery((BooleanQuery) query, terms, prohibited, fieldName); - else - if(query instanceof FilteredQuery) - getTermsFromFilteredQuery((FilteredQuery)query, terms,prohibited, fieldName); - else - { - HashSet nonWeightedTerms=new HashSet(); - query.extractTerms(nonWeightedTerms); - for (Iterator iter = nonWeightedTerms.iterator(); iter.hasNext();) - { - Term term = iter.next(); - if((fieldName==null)||(term.field().equals(fieldName))) - { - terms.add(new WeightedTerm(query.getBoost(),term.text())); - } - } - } - } - catch(UnsupportedOperationException ignore) - { - //this is non-fatal for our purposes - } - } + private static final void getTerms(Query query, HashSet terms, boolean prohibited, String fieldName) { + try { + if (query instanceof BooleanQuery) + getTermsFromBooleanQuery((BooleanQuery) query, terms, prohibited, fieldName); + else if (query instanceof FilteredQuery) + getTermsFromFilteredQuery((FilteredQuery) query, terms, prohibited, fieldName); + else { + HashSet nonWeightedTerms = new HashSet(); + query.extractTerms(nonWeightedTerms); + for (Iterator iter = nonWeightedTerms.iterator(); iter.hasNext(); ) { + Term term = iter.next(); + if ((fieldName == null) || (term.field().equals(fieldName))) { + terms.add(new WeightedTerm(query.getBoost(), term.text())); + } + } + } + } catch (UnsupportedOperationException ignore) { + //this is non-fatal for our purposes + } + } - /** - * extractTerms is currently the only query-independent means of introspecting queries but it only reveals - * a list of terms for that query - not the boosts each individual term in that query may or may not have. - * "Container" queries such as BooleanQuery should be unwrapped to get at the boost info held - * in each child element. - * Some discussion around this topic here: - * http://www.gossamer-threads.com/lists/lucene/java-dev/34208?search_string=introspection;#34208 - * Unfortunately there seemed to be limited interest in requiring all Query objects to implement - * something common which would allow access to child queries so what follows here are query-specific - * implementations for accessing embedded query elements. - */ - private static final void getTermsFromBooleanQuery(BooleanQuery query, HashSet terms, boolean prohibited, String fieldName) - { - BooleanClause[] queryClauses = query.getClauses(); - for (int i = 0; i < queryClauses.length; i++) - { - if (prohibited || queryClauses[i].getOccur()!=BooleanClause.Occur.MUST_NOT) - getTerms(queryClauses[i].getQuery(), terms, prohibited, fieldName); - } - } - private static void getTermsFromFilteredQuery(FilteredQuery query, HashSet terms, boolean prohibited, String fieldName) - { - getTerms(query.getQuery(),terms,prohibited,fieldName); - } - + /** + * extractTerms is currently the only query-independent means of introspecting queries but it only reveals + * a list of terms for that query - not the boosts each individual term in that query may or may not have. + * "Container" queries such as BooleanQuery should be unwrapped to get at the boost info held + * in each child element. + * Some discussion around this topic here: + * http://www.gossamer-threads.com/lists/lucene/java-dev/34208?search_string=introspection;#34208 + * Unfortunately there seemed to be limited interest in requiring all Query objects to implement + * something common which would allow access to child queries so what follows here are query-specific + * implementations for accessing embedded query elements. + */ + private static final void getTermsFromBooleanQuery(BooleanQuery query, HashSet terms, boolean prohibited, String fieldName) + { + BooleanClause[] queryClauses = query.getClauses(); + for (int i = 0; i < queryClauses.length; i++) + { + if (prohibited || queryClauses[i].getOccur()!=BooleanClause.Occur.MUST_NOT) + getTerms(queryClauses[i].getQuery(), terms, prohibited, fieldName); + } + } + private static void getTermsFromFilteredQuery(FilteredQuery query, HashSet terms, boolean prohibited, String fieldName) + { + getTerms(query.getQuery(),terms,prohibited,fieldName); + } + } Index: lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java =================================================================== --- lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java (revision 1381159) +++ lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java (working copy) @@ -38,446 +38,446 @@ public static final int DEFAULT_MAX_CHARS_TO_ANALYZE = 50*1024; private int maxDocCharsToAnalyze = DEFAULT_MAX_CHARS_TO_ANALYZE; - private Formatter formatter; - private Encoder encoder; - private Fragmenter textFragmenter=new SimpleFragmenter(); - private Scorer fragmentScorer=null; + private Formatter formatter; + private Encoder encoder; + private Fragmenter textFragmenter=new SimpleFragmenter(); + private Scorer fragmentScorer=null; - public Highlighter(Scorer fragmentScorer) - { - this(new SimpleHTMLFormatter(),fragmentScorer); - } + public Highlighter(Scorer fragmentScorer) + { + this(new SimpleHTMLFormatter(),fragmentScorer); + } - public Highlighter(Formatter formatter, Scorer fragmentScorer) - { - this(formatter,new DefaultEncoder(),fragmentScorer); - } + public Highlighter(Formatter formatter, Scorer fragmentScorer) + { + this(formatter,new DefaultEncoder(),fragmentScorer); + } - public Highlighter(Formatter formatter, Encoder encoder, Scorer fragmentScorer) - { - this.formatter = formatter; - this.encoder = encoder; - this.fragmentScorer = fragmentScorer; - } + public Highlighter(Formatter formatter, Encoder encoder, Scorer fragmentScorer) + { + this.formatter = formatter; + this.encoder = encoder; + this.fragmentScorer = fragmentScorer; + } - /** - * Highlights chosen terms in a text, extracting the most relevant section. - * This is a convenience method that calls - * {@link #getBestFragment(TokenStream, String)} - * - * @param analyzer the analyzer that will be used to split text - * into chunks - * @param text text to highlight terms in - * @param fieldName Name of field used to influence analyzer's tokenization policy - * - * @return highlighted text fragment or null if no terms found - * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length - */ - public final String getBestFragment(Analyzer analyzer, String fieldName,String text) - throws IOException, InvalidTokenOffsetsException - { - TokenStream tokenStream = analyzer.tokenStream(fieldName, new StringReader(text)); - return getBestFragment(tokenStream, text); - } + /** + * Highlights chosen terms in a text, extracting the most relevant section. + * This is a convenience method that calls + * {@link #getBestFragment(TokenStream, String)} + * + * @param analyzer the analyzer that will be used to split text + * into chunks + * @param text text to highlight terms in + * @param fieldName Name of field used to influence analyzer's tokenization policy + * + * @return highlighted text fragment or null if no terms found + * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length + */ + public final String getBestFragment(Analyzer analyzer, String fieldName,String text) + throws IOException, InvalidTokenOffsetsException + { + TokenStream tokenStream = analyzer.tokenStream(fieldName, new StringReader(text)); + return getBestFragment(tokenStream, text); + } - /** - * Highlights chosen terms in a text, extracting the most relevant section. - * The document text is analysed in chunks to record hit statistics - * across the document. After accumulating stats, the fragment with the highest score - * is returned - * - * @param tokenStream a stream of tokens identified in the text parameter, including offset information. - * This is typically produced by an analyzer re-parsing a document's - * text. Some work may be done on retrieving TokenStreams more efficiently - * by adding support for storing original text position data in the Lucene - * index but this support is not currently available (as of Lucene 1.4 rc2). - * @param text text to highlight terms in - * - * @return highlighted text fragment or null if no terms found - * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length - */ - public final String getBestFragment(TokenStream tokenStream, String text) - throws IOException, InvalidTokenOffsetsException - { - String[] results = getBestFragments(tokenStream,text, 1); - if (results.length > 0) - { - return results[0]; - } - return null; - } + /** + * Highlights chosen terms in a text, extracting the most relevant section. + * The document text is analysed in chunks to record hit statistics + * across the document. After accumulating stats, the fragment with the highest score + * is returned + * + * @param tokenStream a stream of tokens identified in the text parameter, including offset information. + * This is typically produced by an analyzer re-parsing a document's + * text. Some work may be done on retrieving TokenStreams more efficiently + * by adding support for storing original text position data in the Lucene + * index but this support is not currently available (as of Lucene 1.4 rc2). + * @param text text to highlight terms in + * + * @return highlighted text fragment or null if no terms found + * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length + */ + public final String getBestFragment(TokenStream tokenStream, String text) + throws IOException, InvalidTokenOffsetsException + { + String[] results = getBestFragments(tokenStream,text, 1); + if (results.length > 0) + { + return results[0]; + } + return null; + } - /** - * Highlights chosen terms in a text, extracting the most relevant sections. - * This is a convenience method that calls - * {@link #getBestFragments(TokenStream, String, int)} - * - * @param analyzer the analyzer that will be used to split text - * into chunks - * @param fieldName the name of the field being highlighted (used by analyzer) - * @param text text to highlight terms in - * @param maxNumFragments the maximum number of fragments. - * - * @return highlighted text fragments (between 0 and maxNumFragments number of fragments) - * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length - */ - public final String[] getBestFragments( - Analyzer analyzer, - String fieldName, - String text, - int maxNumFragments) - throws IOException, InvalidTokenOffsetsException - { - TokenStream tokenStream = analyzer.tokenStream(fieldName, new StringReader(text)); - return getBestFragments(tokenStream, text, maxNumFragments); - } + /** + * Highlights chosen terms in a text, extracting the most relevant sections. + * This is a convenience method that calls + * {@link #getBestFragments(TokenStream, String, int)} + * + * @param analyzer the analyzer that will be used to split text + * into chunks + * @param fieldName the name of the field being highlighted (used by analyzer) + * @param text text to highlight terms in + * @param maxNumFragments the maximum number of fragments. + * + * @return highlighted text fragments (between 0 and maxNumFragments number of fragments) + * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length + */ + public final String[] getBestFragments( + Analyzer analyzer, + String fieldName, + String text, + int maxNumFragments) + throws IOException, InvalidTokenOffsetsException + { + TokenStream tokenStream = analyzer.tokenStream(fieldName, new StringReader(text)); + return getBestFragments(tokenStream, text, maxNumFragments); + } - /** - * Highlights chosen terms in a text, extracting the most relevant sections. - * The document text is analysed in chunks to record hit statistics - * across the document. After accumulating stats, the fragments with the highest scores - * are returned as an array of strings in order of score (contiguous fragments are merged into - * one in their original order to improve readability) - * - * @param text text to highlight terms in - * @param maxNumFragments the maximum number of fragments. - * - * @return highlighted text fragments (between 0 and maxNumFragments number of fragments) - * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length - */ - public final String[] getBestFragments( - TokenStream tokenStream, - String text, - int maxNumFragments) - throws IOException, InvalidTokenOffsetsException - { - maxNumFragments = Math.max(1, maxNumFragments); //sanity check + /** + * Highlights chosen terms in a text, extracting the most relevant sections. + * The document text is analysed in chunks to record hit statistics + * across the document. After accumulating stats, the fragments with the highest scores + * are returned as an array of strings in order of score (contiguous fragments are merged into + * one in their original order to improve readability) + * + * @param text text to highlight terms in + * @param maxNumFragments the maximum number of fragments. + * + * @return highlighted text fragments (between 0 and maxNumFragments number of fragments) + * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length + */ + public final String[] getBestFragments( + TokenStream tokenStream, + String text, + int maxNumFragments) + throws IOException, InvalidTokenOffsetsException + { + maxNumFragments = Math.max(1, maxNumFragments); //sanity check - TextFragment[] frag =getBestTextFragments(tokenStream,text, true,maxNumFragments); + TextFragment[] frag =getBestTextFragments(tokenStream,text, true,maxNumFragments); - //Get text - ArrayList fragTexts = new ArrayList(); - for (int i = 0; i < frag.length; i++) - { - if ((frag[i] != null) && (frag[i].getScore() > 0)) - { - fragTexts.add(frag[i].toString()); - } - } - return fragTexts.toArray(new String[0]); - } + //Get text + ArrayList fragTexts = new ArrayList(); + for (int i = 0; i < frag.length; i++) + { + if ((frag[i] != null) && (frag[i].getScore() > 0)) + { + fragTexts.add(frag[i].toString()); + } + } + return fragTexts.toArray(new String[0]); + } - /** - * Low level api to get the most relevant (formatted) sections of the document. - * This method has been made public to allow visibility of score information held in TextFragment objects. - * Thanks to Jason Calabrese for help in redefining the interface. - * @param tokenStream - * @param text - * @param maxNumFragments - * @param mergeContiguousFragments - * @throws IOException - * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length - */ - public final TextFragment[] getBestTextFragments( - TokenStream tokenStream, - String text, - boolean mergeContiguousFragments, - int maxNumFragments) - throws IOException, InvalidTokenOffsetsException - { - ArrayList docFrags = new ArrayList(); - StringBuilder newText=new StringBuilder(); - - CharTermAttribute termAtt = tokenStream.addAttribute(CharTermAttribute.class); - OffsetAttribute offsetAtt = tokenStream.addAttribute(OffsetAttribute.class); - tokenStream.addAttribute(PositionIncrementAttribute.class); - tokenStream.reset(); - - TextFragment currentFrag = new TextFragment(newText,newText.length(), docFrags.size()); - + /** + * Low level api to get the most relevant (formatted) sections of the document. + * This method has been made public to allow visibility of score information held in TextFragment objects. + * Thanks to Jason Calabrese for help in redefining the interface. + * @param tokenStream + * @param text + * @param maxNumFragments + * @param mergeContiguousFragments + * @throws IOException + * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length + */ + public final TextFragment[] getBestTextFragments( + TokenStream tokenStream, + String text, + boolean mergeContiguousFragments, + int maxNumFragments) + throws IOException, InvalidTokenOffsetsException + { + ArrayList docFrags = new ArrayList(); + StringBuilder newText=new StringBuilder(); + + CharTermAttribute termAtt = tokenStream.addAttribute(CharTermAttribute.class); + OffsetAttribute offsetAtt = tokenStream.addAttribute(OffsetAttribute.class); + tokenStream.addAttribute(PositionIncrementAttribute.class); + tokenStream.reset(); + + TextFragment currentFrag = new TextFragment(newText,newText.length(), docFrags.size()); + if (fragmentScorer instanceof QueryScorer) { ((QueryScorer) fragmentScorer).setMaxDocCharsToAnalyze(maxDocCharsToAnalyze); } - TokenStream newStream = fragmentScorer.init(tokenStream); - if(newStream != null) { - tokenStream = newStream; - } - fragmentScorer.startFragment(currentFrag); - docFrags.add(currentFrag); + TokenStream newStream = fragmentScorer.init(tokenStream); + if(newStream != null) { + tokenStream = newStream; + } + fragmentScorer.startFragment(currentFrag); + docFrags.add(currentFrag); - FragmentQueue fragQueue = new FragmentQueue(maxNumFragments); + FragmentQueue fragQueue = new FragmentQueue(maxNumFragments); - try - { + try + { - String tokenText; - int startOffset; - int endOffset; - int lastEndOffset = 0; - textFragmenter.start(text, tokenStream); + String tokenText; + int startOffset; + int endOffset; + int lastEndOffset = 0; + textFragmenter.start(text, tokenStream); - TokenGroup tokenGroup=new TokenGroup(tokenStream); + TokenGroup tokenGroup=new TokenGroup(tokenStream); - for (boolean next = tokenStream.incrementToken(); next && (offsetAtt.startOffset()< maxDocCharsToAnalyze); - next = tokenStream.incrementToken()) - { - if( (offsetAtt.endOffset()>text.length()) - || - (offsetAtt.startOffset()>text.length()) - ) - { - throw new InvalidTokenOffsetsException("Token "+ termAtt.toString() - +" exceeds length of provided text sized "+text.length()); - } - if((tokenGroup.numTokens>0)&&(tokenGroup.isDistinct())) - { - //the current token is distinct from previous tokens - - // markup the cached token group info - startOffset = tokenGroup.matchStartOffset; - endOffset = tokenGroup.matchEndOffset; - tokenText = text.substring(startOffset, endOffset); - String markedUpText=formatter.highlightTerm(encoder.encodeText(tokenText), tokenGroup); - //store any whitespace etc from between this and last group - if (startOffset > lastEndOffset) - newText.append(encoder.encodeText(text.substring(lastEndOffset, startOffset))); - newText.append(markedUpText); - lastEndOffset=Math.max(endOffset, lastEndOffset); - tokenGroup.clear(); + for (boolean next = tokenStream.incrementToken(); next && (offsetAtt.startOffset()< maxDocCharsToAnalyze); + next = tokenStream.incrementToken()) + { + if( (offsetAtt.endOffset()>text.length()) + || + (offsetAtt.startOffset()>text.length()) + ) + { + throw new InvalidTokenOffsetsException("Token "+ termAtt.toString() + +" exceeds length of provided text sized "+text.length()); + } + if((tokenGroup.numTokens>0)&&(tokenGroup.isDistinct())) + { + //the current token is distinct from previous tokens - + // markup the cached token group info + startOffset = tokenGroup.matchStartOffset; + endOffset = tokenGroup.matchEndOffset; + tokenText = text.substring(startOffset, endOffset); + String markedUpText=formatter.highlightTerm(encoder.encodeText(tokenText), tokenGroup); + //store any whitespace etc from between this and last group + if (startOffset > lastEndOffset) + newText.append(encoder.encodeText(text.substring(lastEndOffset, startOffset))); + newText.append(markedUpText); + lastEndOffset=Math.max(endOffset, lastEndOffset); + tokenGroup.clear(); - //check if current token marks the start of a new fragment - if(textFragmenter.isNewFragment()) - { - currentFrag.setScore(fragmentScorer.getFragmentScore()); - //record stats for a new fragment - currentFrag.textEndPos = newText.length(); - currentFrag =new TextFragment(newText, newText.length(), docFrags.size()); - fragmentScorer.startFragment(currentFrag); - docFrags.add(currentFrag); - } - } + //check if current token marks the start of a new fragment + if(textFragmenter.isNewFragment()) + { + currentFrag.setScore(fragmentScorer.getFragmentScore()); + //record stats for a new fragment + currentFrag.textEndPos = newText.length(); + currentFrag =new TextFragment(newText, newText.length(), docFrags.size()); + fragmentScorer.startFragment(currentFrag); + docFrags.add(currentFrag); + } + } - tokenGroup.addToken(fragmentScorer.getTokenScore()); + tokenGroup.addToken(fragmentScorer.getTokenScore()); -// if(lastEndOffset>maxDocBytesToAnalyze) -// { -// break; -// } - } - currentFrag.setScore(fragmentScorer.getFragmentScore()); +// if(lastEndOffset>maxDocBytesToAnalyze) +// { +// break; +// } + } + currentFrag.setScore(fragmentScorer.getFragmentScore()); - if(tokenGroup.numTokens>0) - { - //flush the accumulated text (same code as in above loop) - startOffset = tokenGroup.matchStartOffset; - endOffset = tokenGroup.matchEndOffset; - tokenText = text.substring(startOffset, endOffset); - String markedUpText=formatter.highlightTerm(encoder.encodeText(tokenText), tokenGroup); - //store any whitespace etc from between this and last group - if (startOffset > lastEndOffset) - newText.append(encoder.encodeText(text.substring(lastEndOffset, startOffset))); - newText.append(markedUpText); - lastEndOffset=Math.max(lastEndOffset,endOffset); - } + if(tokenGroup.numTokens>0) + { + //flush the accumulated text (same code as in above loop) + startOffset = tokenGroup.matchStartOffset; + endOffset = tokenGroup.matchEndOffset; + tokenText = text.substring(startOffset, endOffset); + String markedUpText=formatter.highlightTerm(encoder.encodeText(tokenText), tokenGroup); + //store any whitespace etc from between this and last group + if (startOffset > lastEndOffset) + newText.append(encoder.encodeText(text.substring(lastEndOffset, startOffset))); + newText.append(markedUpText); + lastEndOffset=Math.max(lastEndOffset,endOffset); + } - //Test what remains of the original text beyond the point where we stopped analyzing - if ( -// if there is text beyond the last token considered.. - (lastEndOffset < text.length()) - && -// and that text is not too large... - (text.length()<= maxDocCharsToAnalyze) - ) - { - //append it to the last fragment - newText.append(encoder.encodeText(text.substring(lastEndOffset))); - } + //Test what remains of the original text beyond the point where we stopped analyzing + if ( +// if there is text beyond the last token considered.. + (lastEndOffset < text.length()) + && +// and that text is not too large... + (text.length()<= maxDocCharsToAnalyze) + ) + { + //append it to the last fragment + newText.append(encoder.encodeText(text.substring(lastEndOffset))); + } - currentFrag.textEndPos = newText.length(); + currentFrag.textEndPos = newText.length(); - //sort the most relevant sections of the text - for (Iterator i = docFrags.iterator(); i.hasNext();) - { - currentFrag = i.next(); + //sort the most relevant sections of the text + for (Iterator i = docFrags.iterator(); i.hasNext();) + { + currentFrag = i.next(); - //If you are running with a version of Lucene before 11th Sept 03 - // you do not have PriorityQueue.insert() - so uncomment the code below - /* - if (currentFrag.getScore() >= minScore) - { - fragQueue.put(currentFrag); - if (fragQueue.size() > maxNumFragments) - { // if hit queue overfull - fragQueue.pop(); // remove lowest in hit queue - minScore = ((TextFragment) fragQueue.top()).getScore(); // reset minScore - } + //If you are running with a version of Lucene before 11th Sept 03 + // you do not have PriorityQueue.insert() - so uncomment the code below + /* + if (currentFrag.getScore() >= minScore) + { + fragQueue.put(currentFrag); + if (fragQueue.size() > maxNumFragments) + { // if hit queue overfull + fragQueue.pop(); // remove lowest in hit queue + minScore = ((TextFragment) fragQueue.top()).getScore(); // reset minScore + } - } - */ - //The above code caused a problem as a result of Christoph Goller's 11th Sept 03 - //fix to PriorityQueue. The correct method to use here is the new "insert" method - // USE ABOVE CODE IF THIS DOES NOT COMPILE! - fragQueue.insertWithOverflow(currentFrag); - } + } + */ + //The above code caused a problem as a result of Christoph Goller's 11th Sept 03 + //fix to PriorityQueue. The correct method to use here is the new "insert" method + // USE ABOVE CODE IF THIS DOES NOT COMPILE! + fragQueue.insertWithOverflow(currentFrag); + } - //return the most relevant fragments - TextFragment frag[] = new TextFragment[fragQueue.size()]; - for (int i = frag.length - 1; i >= 0; i--) - { - frag[i] = fragQueue.pop(); - } + //return the most relevant fragments + TextFragment frag[] = new TextFragment[fragQueue.size()]; + for (int i = frag.length - 1; i >= 0; i--) + { + frag[i] = fragQueue.pop(); + } - //merge any contiguous fragments to improve readability - if(mergeContiguousFragments) - { - mergeContiguousFragments(frag); - ArrayList fragTexts = new ArrayList(); - for (int i = 0; i < frag.length; i++) - { - if ((frag[i] != null) && (frag[i].getScore() > 0)) - { - fragTexts.add(frag[i]); - } - } - frag= fragTexts.toArray(new TextFragment[0]); - } + //merge any contiguous fragments to improve readability + if(mergeContiguousFragments) + { + mergeContiguousFragments(frag); + ArrayList fragTexts = new ArrayList(); + for (int i = 0; i < frag.length; i++) + { + if ((frag[i] != null) && (frag[i].getScore() > 0)) + { + fragTexts.add(frag[i]); + } + } + frag= fragTexts.toArray(new TextFragment[0]); + } - return frag; + return frag; - } - finally - { - if (tokenStream != null) - { - try - { - tokenStream.end(); - tokenStream.close(); - } - catch (Exception e) - { - } - } - } - } + } + finally + { + if (tokenStream != null) + { + try + { + tokenStream.end(); + tokenStream.close(); + } + catch (Exception e) + { + } + } + } + } - /** Improves readability of a score-sorted list of TextFragments by merging any fragments - * that were contiguous in the original text into one larger fragment with the correct order. - * This will leave a "null" in the array entry for the lesser scored fragment. - * - * @param frag An array of document fragments in descending score - */ - private void mergeContiguousFragments(TextFragment[] frag) - { - boolean mergingStillBeingDone; - if (frag.length > 1) - do - { - mergingStillBeingDone = false; //initialise loop control flag - //for each fragment, scan other frags looking for contiguous blocks - for (int i = 0; i < frag.length; i++) - { - if (frag[i] == null) - { - continue; - } - //merge any contiguous blocks - for (int x = 0; x < frag.length; x++) - { - if (frag[x] == null) - { - continue; - } - if (frag[i] == null) - { - break; - } - TextFragment frag1 = null; - TextFragment frag2 = null; - int frag1Num = 0; - int frag2Num = 0; - int bestScoringFragNum; - int worstScoringFragNum; - //if blocks are contiguous.... - if (frag[i].follows(frag[x])) - { - frag1 = frag[x]; - frag1Num = x; - frag2 = frag[i]; - frag2Num = i; - } - else - if (frag[x].follows(frag[i])) - { - frag1 = frag[i]; - frag1Num = i; - frag2 = frag[x]; - frag2Num = x; - } - //merging required.. - if (frag1 != null) - { - if (frag1.getScore() > frag2.getScore()) - { - bestScoringFragNum = frag1Num; - worstScoringFragNum = frag2Num; - } - else - { - bestScoringFragNum = frag2Num; - worstScoringFragNum = frag1Num; - } - frag1.merge(frag2); - frag[worstScoringFragNum] = null; - mergingStillBeingDone = true; - frag[bestScoringFragNum] = frag1; - } - } - } - } - while (mergingStillBeingDone); - } - - - /** - * Highlights terms in the text , extracting the most relevant sections - * and concatenating the chosen fragments with a separator (typically "..."). - * The document text is analysed in chunks to record hit statistics - * across the document. After accumulating stats, the fragments with the highest scores - * are returned in order as "separator" delimited strings. - * - * @param text text to highlight terms in - * @param maxNumFragments the maximum number of fragments. - * @param separator the separator used to intersperse the document fragments (typically "...") - * - * @return highlighted text - * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length - */ - public final String getBestFragments( - TokenStream tokenStream, - String text, - int maxNumFragments, - String separator) - throws IOException, InvalidTokenOffsetsException - { - String sections[] = getBestFragments(tokenStream,text, maxNumFragments); - StringBuilder result = new StringBuilder(); - for (int i = 0; i < sections.length; i++) - { - if (i > 0) - { - result.append(separator); - } - result.append(sections[i]); - } - return result.toString(); - } + /** Improves readability of a score-sorted list of TextFragments by merging any fragments + * that were contiguous in the original text into one larger fragment with the correct order. + * This will leave a "null" in the array entry for the lesser scored fragment. + * + * @param frag An array of document fragments in descending score + */ + private void mergeContiguousFragments(TextFragment[] frag) + { + boolean mergingStillBeingDone; + if (frag.length > 1) + do + { + mergingStillBeingDone = false; //initialise loop control flag + //for each fragment, scan other frags looking for contiguous blocks + for (int i = 0; i < frag.length; i++) + { + if (frag[i] == null) + { + continue; + } + //merge any contiguous blocks + for (int x = 0; x < frag.length; x++) + { + if (frag[x] == null) + { + continue; + } + if (frag[i] == null) + { + break; + } + TextFragment frag1 = null; + TextFragment frag2 = null; + int frag1Num = 0; + int frag2Num = 0; + int bestScoringFragNum; + int worstScoringFragNum; + //if blocks are contiguous.... + if (frag[i].follows(frag[x])) + { + frag1 = frag[x]; + frag1Num = x; + frag2 = frag[i]; + frag2Num = i; + } + else + if (frag[x].follows(frag[i])) + { + frag1 = frag[i]; + frag1Num = i; + frag2 = frag[x]; + frag2Num = x; + } + //merging required.. + if (frag1 != null) + { + if (frag1.getScore() > frag2.getScore()) + { + bestScoringFragNum = frag1Num; + worstScoringFragNum = frag2Num; + } + else + { + bestScoringFragNum = frag2Num; + worstScoringFragNum = frag1Num; + } + frag1.merge(frag2); + frag[worstScoringFragNum] = null; + mergingStillBeingDone = true; + frag[bestScoringFragNum] = frag1; + } + } + } + } + while (mergingStillBeingDone); + } + + /** + * Highlights terms in the text , extracting the most relevant sections + * and concatenating the chosen fragments with a separator (typically "..."). + * The document text is analysed in chunks to record hit statistics + * across the document. After accumulating stats, the fragments with the highest scores + * are returned in order as "separator" delimited strings. + * + * @param text text to highlight terms in + * @param maxNumFragments the maximum number of fragments. + * @param separator the separator used to intersperse the document fragments (typically "...") + * + * @return highlighted text + * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length + */ + public final String getBestFragments( + TokenStream tokenStream, + String text, + int maxNumFragments, + String separator) + throws IOException, InvalidTokenOffsetsException + { + String sections[] = getBestFragments(tokenStream,text, maxNumFragments); + StringBuilder result = new StringBuilder(); + for (int i = 0; i < sections.length; i++) + { + if (i > 0) + { + result.append(separator); + } + result.append(sections[i]); + } + return result.toString(); + } + public int getMaxDocCharsToAnalyze() { return maxDocCharsToAnalyze; } @@ -487,35 +487,35 @@ } - public Fragmenter getTextFragmenter() - { - return textFragmenter; - } + public Fragmenter getTextFragmenter() + { + return textFragmenter; + } - /** - * @param fragmenter - */ - public void setTextFragmenter(Fragmenter fragmenter) - { - textFragmenter = fragmenter; - } + /** + * @param fragmenter + */ + public void setTextFragmenter(Fragmenter fragmenter) + { + textFragmenter = fragmenter; + } - /** - * @return Object used to score each text fragment - */ - public Scorer getFragmentScorer() - { - return fragmentScorer; - } + /** + * @return Object used to score each text fragment + */ + public Scorer getFragmentScorer() + { + return fragmentScorer; + } - /** - * @param scorer - */ - public void setFragmentScorer(Scorer scorer) - { - fragmentScorer = scorer; - } + /** + * @param scorer + */ + public void setFragmentScorer(Scorer scorer) + { + fragmentScorer = scorer; + } public Encoder getEncoder() { @@ -528,17 +528,17 @@ } class FragmentQueue extends PriorityQueue { - public FragmentQueue(int size) - { - super(size); - } + public FragmentQueue(int size) + { + super(size); + } - @Override - public final boolean lessThan(TextFragment fragA, TextFragment fragB) - { - if (fragA.getScore() == fragB.getScore()) - return fragA.fragNum > fragB.fragNum; - else - return fragA.getScore() < fragB.getScore(); - } + @Override + public final boolean lessThan(TextFragment fragA, TextFragment fragB) + { + if (fragA.getScore() == fragB.getScore()) + return fragA.fragNum > fragB.fragNum; + else + return fragA.getScore() < fragB.getScore(); + } } Index: lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowFuzzyQuery.java =================================================================== --- lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowFuzzyQuery.java (revision 1381159) +++ lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowFuzzyQuery.java (working copy) @@ -105,7 +105,7 @@ } // not similar enough: - query = new SlowFuzzyQuery(new Term("field", "xxxxx"), SlowFuzzyQuery.defaultMinSimilarity, 0); + query = new SlowFuzzyQuery(new Term("field", "xxxxx"), SlowFuzzyQuery.defaultMinSimilarity, 0); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(0, hits.length); query = new SlowFuzzyQuery(new Term("field", "aaccc"), SlowFuzzyQuery.defaultMinSimilarity, 0); // edit distance to "aaaaa" = 3 Index: lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java =================================================================== --- lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java (revision 1381159) +++ lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java (working copy) @@ -121,17 +121,17 @@ class FieldVals { - String queryString; - String fieldName; - float minSimilarity; - int prefixLength; - public FieldVals(String name, float similarity, int length, String queryString) - { - fieldName = name; - minSimilarity = similarity; - prefixLength = length; - this.queryString = queryString; - } + String queryString; + String fieldName; + float minSimilarity; + int prefixLength; + public FieldVals(String name, float similarity, int length, String queryString) + { + fieldName = name; + minSimilarity = similarity; + prefixLength = length; + this.queryString = queryString; + } @Override public int hashCode() { @@ -174,7 +174,7 @@ } - + } /** @@ -186,77 +186,72 @@ */ public void addTerms(String queryString, String fieldName,float minSimilarity, int prefixLength) { - fieldVals.add(new FieldVals(fieldName,minSimilarity,prefixLength,queryString)); + fieldVals.add(new FieldVals(fieldName,minSimilarity,prefixLength,queryString)); } - - - private void addTerms(IndexReader reader,FieldVals f) throws IOException - { - if(f.queryString==null) return; - TokenStream ts=analyzer.tokenStream(f.fieldName, new StringReader(f.queryString)); - CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); - - int corpusNumDocs=reader.numDocs(); - HashSet processedTerms=new HashSet(); - ts.reset(); - while (ts.incrementToken()) - { - String term = termAtt.toString(); - if(!processedTerms.contains(term)) - { - processedTerms.add(term); - ScoreTermQueue variantsQ=new ScoreTermQueue(MAX_VARIANTS_PER_TERM); //maxNum variants considered for any one term - float minScore=0; - Term startTerm=new Term(f.fieldName, term); - AttributeSource atts = new AttributeSource(); - MaxNonCompetitiveBoostAttribute maxBoostAtt = - atts.addAttribute(MaxNonCompetitiveBoostAttribute.class); - SlowFuzzyTermsEnum fe = new SlowFuzzyTermsEnum(MultiFields.getTerms(reader, startTerm.field()), atts, startTerm, f.minSimilarity, f.prefixLength); - //store the df so all variants use same idf - int df = reader.docFreq(startTerm); - int numVariants=0; - int totalVariantDocFreqs=0; - BytesRef possibleMatch; - BoostAttribute boostAtt = - fe.attributes().addAttribute(BoostAttribute.class); - while ((possibleMatch = fe.next()) != null) { - numVariants++; - totalVariantDocFreqs+=fe.docFreq(); - float score=boostAtt.getBoost(); - if (variantsQ.size() < MAX_VARIANTS_PER_TERM || score > minScore){ - ScoreTerm st=new ScoreTerm(new Term(startTerm.field(), BytesRef.deepCopyOf(possibleMatch)),score,startTerm); - variantsQ.insertWithOverflow(st); - minScore = variantsQ.top().score; // maintain minScore - } - maxBoostAtt.setMaxNonCompetitiveBoost(variantsQ.size() >= MAX_VARIANTS_PER_TERM ? minScore : Float.NEGATIVE_INFINITY); - } - if(numVariants>0) - { - int avgDf=totalVariantDocFreqs/numVariants; - if(df==0)//no direct match we can use as df for all variants - { - df=avgDf; //use avg df of all variants - } - - // take the top variants (scored by edit distance) and reset the score - // to include an IDF factor then add to the global queue for ranking - // overall top query terms - int size = variantsQ.size(); - for(int i = 0; i < size; i++) - { - ScoreTerm st = variantsQ.pop(); - st.score=(st.score*st.score)*sim.idf(df,corpusNumDocs); - q.insertWithOverflow(st); - } - } - } + + private void addTerms(IndexReader reader, FieldVals f) throws IOException { + if (f.queryString == null) return; + TokenStream ts = analyzer.tokenStream(f.fieldName, new StringReader(f.queryString)); + CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); + + int corpusNumDocs = reader.numDocs(); + HashSet processedTerms = new HashSet(); + ts.reset(); + while (ts.incrementToken()) { + String term = termAtt.toString(); + if (!processedTerms.contains(term)) { + processedTerms.add(term); + ScoreTermQueue variantsQ = new ScoreTermQueue(MAX_VARIANTS_PER_TERM); //maxNum variants considered for any one term + float minScore = 0; + Term startTerm = new Term(f.fieldName, term); + AttributeSource atts = new AttributeSource(); + MaxNonCompetitiveBoostAttribute maxBoostAtt = + atts.addAttribute(MaxNonCompetitiveBoostAttribute.class); + SlowFuzzyTermsEnum fe = new SlowFuzzyTermsEnum(MultiFields.getTerms(reader, startTerm.field()), atts, startTerm, f.minSimilarity, f.prefixLength); + //store the df so all variants use same idf + int df = reader.docFreq(startTerm); + int numVariants = 0; + int totalVariantDocFreqs = 0; + BytesRef possibleMatch; + BoostAttribute boostAtt = + fe.attributes().addAttribute(BoostAttribute.class); + while ((possibleMatch = fe.next()) != null) { + numVariants++; + totalVariantDocFreqs += fe.docFreq(); + float score = boostAtt.getBoost(); + if (variantsQ.size() < MAX_VARIANTS_PER_TERM || score > minScore) { + ScoreTerm st = new ScoreTerm(new Term(startTerm.field(), BytesRef.deepCopyOf(possibleMatch)), score, startTerm); + variantsQ.insertWithOverflow(st); + minScore = variantsQ.top().score; // maintain minScore + } + maxBoostAtt.setMaxNonCompetitiveBoost(variantsQ.size() >= MAX_VARIANTS_PER_TERM ? minScore : Float.NEGATIVE_INFINITY); } - ts.end(); - ts.close(); + + if (numVariants > 0) { + int avgDf = totalVariantDocFreqs / numVariants; + if (df == 0)//no direct match we can use as df for all variants + { + df = avgDf; //use avg df of all variants + } + + // take the top variants (scored by edit distance) and reset the score + // to include an IDF factor then add to the global queue for ranking + // overall top query terms + int size = variantsQ.size(); + for (int i = 0; i < size; i++) { + ScoreTerm st = variantsQ.pop(); + st.score = (st.score * st.score) * sim.idf(df, corpusNumDocs); + q.insertWithOverflow(st); + } + } + } } - - @Override + ts.end(); + ts.close(); + } + + @Override public Query rewrite(IndexReader reader) throws IOException { if(rewrittenQuery!=null) @@ -264,12 +259,11 @@ return rewrittenQuery; } //load up the list of possible terms - for (Iterator iter = fieldVals.iterator(); iter.hasNext();) - { - FieldVals f = iter.next(); - addTerms(reader,f); - } - //clear the list of fields + for (Iterator iter = fieldVals.iterator(); iter.hasNext(); ) { + FieldVals f = iter.next(); + addTerms(reader, f); + } + //clear the list of fields fieldVals.clear(); BooleanQuery bq=new BooleanQuery(); @@ -368,15 +362,15 @@ } - public boolean isIgnoreTF() - { - return ignoreTF; - } + public boolean isIgnoreTF() + { + return ignoreTF; + } - public void setIgnoreTF(boolean ignoreTF) - { - this.ignoreTF = ignoreTF; - } + public void setIgnoreTF(boolean ignoreTF) + { + this.ignoreTF = ignoreTF; + } } Index: lucene/codecs/src/java/org/apache/lucene/codecs/FixedGapTermsIndexReader.java =================================================================== --- lucene/codecs/src/java/org/apache/lucene/codecs/FixedGapTermsIndexReader.java (revision 1381159) +++ lucene/codecs/src/java/org/apache/lucene/codecs/FixedGapTermsIndexReader.java (working copy) @@ -169,7 +169,7 @@ @Override public long seek(BytesRef target) { - int lo = 0; // binary search + int lo = 0; // binary search int hi = fieldIndex.numIndexTerms - 1; assert totalIndexInterval > 0 : "totalIndexInterval=" + totalIndexInterval; Index: lucene/test-framework/src/java/org/apache/lucene/analysis/MockTokenizer.java =================================================================== --- lucene/test-framework/src/java/org/apache/lucene/analysis/MockTokenizer.java (revision 1381159) +++ lucene/test-framework/src/java/org/apache/lucene/analysis/MockTokenizer.java (working copy) @@ -160,7 +160,7 @@ return Character.toCodePoint((char) ch, (char) ch2); } else { assert false : "stream ends with unpaired high surrogate: " + Integer.toHexString(ch); - } + } } return ch; } Index: lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java =================================================================== --- lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java (revision 1381159) +++ lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java (working copy) @@ -358,7 +358,7 @@ } else { Logger.getLogger(LuceneTestCase.class.getSimpleName()).warning( "Property '" + SYSPROP_MAXFAILURES + "'=" + maxFailures + ", 'failfast' is" + - " ignored."); + " ignored."); } } Index: lucene/test-framework/src/java/org/apache/lucene/util/RunListenerPrintReproduceInfo.java =================================================================== --- lucene/test-framework/src/java/org/apache/lucene/util/RunListenerPrintReproduceInfo.java (revision 1381159) +++ lucene/test-framework/src/java/org/apache/lucene/util/RunListenerPrintReproduceInfo.java (working copy) @@ -129,7 +129,7 @@ private void reportAdditionalFailureInfo(final String testName) { if (TEST_LINE_DOCS_FILE.endsWith(JENKINS_LARGE_LINE_DOCS_FILE)) { System.err.println("NOTE: download the large Jenkins line-docs file by running " + - "'ant get-jenkins-line-docs' in the lucene directory."); + "'ant get-jenkins-line-docs' in the lucene directory."); } final StringBuilder b = new StringBuilder(); Index: lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/SimpleBoolFunction.java =================================================================== --- lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/SimpleBoolFunction.java (revision 1381159) +++ lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/SimpleBoolFunction.java (working copy) @@ -44,11 +44,11 @@ return new BoolDocValues(this) { @Override public boolean boolVal(int doc) { - return func(doc, vals); + return func(doc, vals); } @Override public String toString(int doc) { - return name() + '(' + vals.toString(doc) + ')'; + return name() + '(' + vals.toString(doc) + ')'; } }; } Index: lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ScaleFloatFunction.java =================================================================== --- lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ScaleFloatFunction.java (revision 1381159) +++ lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ScaleFloatFunction.java (working copy) @@ -115,11 +115,11 @@ return new FloatDocValues(this) { @Override public float floatVal(int doc) { - return (vals.floatVal(doc) - minSource) * scale + min; + return (vals.floatVal(doc) - minSource) * scale + min; } @Override public String toString(int doc) { - return "scale(" + vals.toString(doc) + ",toMin=" + min + ",toMax=" + max + return "scale(" + vals.toString(doc) + ",toMin=" + min + ",toMax=" + max + ",fromMin=" + minSource + ",fromMax=" + maxSource + ")"; Index: lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/MultiBoolFunction.java =================================================================== --- lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/MultiBoolFunction.java (revision 1381159) +++ lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/MultiBoolFunction.java (working copy) @@ -50,7 +50,7 @@ return new BoolDocValues(this) { @Override public boolean boolVal(int doc) { - return func(doc, vals); + return func(doc, vals); } @Override Index: lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/DualFloatFunction.java =================================================================== --- lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/DualFloatFunction.java (revision 1381159) +++ lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/DualFloatFunction.java (working copy) @@ -54,11 +54,12 @@ return new FloatDocValues(this) { @Override public float floatVal(int doc) { - return func(doc, aVals, bVals); + return func(doc, aVals, bVals); } + @Override public String toString(int doc) { - return name() + '(' + aVals.toString(doc) + ',' + bVals.toString(doc) + ')'; + return name() + '(' + aVals.toString(doc) + ',' + bVals.toString(doc) + ')'; } }; } Index: lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/SimpleFloatFunction.java =================================================================== --- lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/SimpleFloatFunction.java (revision 1381159) +++ lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/SimpleFloatFunction.java (working copy) @@ -40,11 +40,11 @@ return new FloatDocValues(this) { @Override public float floatVal(int doc) { - return func(doc, vals); + return func(doc, vals); } @Override public String toString(int doc) { - return name() + '(' + vals.toString(doc) + ')'; + return name() + '(' + vals.toString(doc) + ')'; } }; }