lucene-pylucene-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From va...@apache.org
Subject svn commit: r1787657 - in /lucene/pylucene/trunk: ./ python/ test/ test2/ test3/
Date Sun, 19 Mar 2017 20:29:12 GMT
Author: vajda
Date: Sun Mar 19 20:29:12 2017
New Revision: 1787657

URL: http://svn.apache.org/viewvc?rev=1787657&view=rev
Log:
Added support for Python 3 (3.6+), tested only on Mac OS X so far.

Added:
    lucene/pylucene/trunk/test2/
      - copied from r1759171, lucene/pylucene/trunk/test/
    lucene/pylucene/trunk/test2/test_ICUFoldingFilter.py
      - copied unchanged from r1759972, lucene/pylucene/trunk/test/test_ICUFoldingFilter.py
    lucene/pylucene/trunk/test2/test_ICUNormalizer2Filter.py
      - copied unchanged from r1759972, lucene/pylucene/trunk/test/test_ICUNormalizer2Filter.py
    lucene/pylucene/trunk/test2/test_PositionIncrement.py
      - copied unchanged from r1759972, lucene/pylucene/trunk/test/test_PositionIncrement.py
    lucene/pylucene/trunk/test2/test_PyLucene.py
      - copied unchanged from r1781941, lucene/pylucene/trunk/test/test_PyLucene.py
    lucene/pylucene/trunk/test3/
      - copied from r1759171, lucene/pylucene/trunk/test/
    lucene/pylucene/trunk/test3/test_ICUFoldingFilter.py
      - copied, changed from r1759972, lucene/pylucene/trunk/test/test_ICUFoldingFilter.py
    lucene/pylucene/trunk/test3/test_ICUNormalizer2Filter.py
      - copied, changed from r1759972, lucene/pylucene/trunk/test/test_ICUNormalizer2Filter.py
    lucene/pylucene/trunk/test3/test_PositionIncrement.py
      - copied, changed from r1759972, lucene/pylucene/trunk/test/test_PositionIncrement.py
    lucene/pylucene/trunk/test3/test_PyLucene.py
      - copied, changed from r1781941, lucene/pylucene/trunk/test/test_PyLucene.py
Removed:
    lucene/pylucene/trunk/test/
Modified:
    lucene/pylucene/trunk/CHANGES
    lucene/pylucene/trunk/Makefile
    lucene/pylucene/trunk/python/collections.py
    lucene/pylucene/trunk/test3/BaseTokenStreamTestCase.py
    lucene/pylucene/trunk/test3/test_BinaryDocument.py
    lucene/pylucene/trunk/test3/test_BooleanQuery.py
    lucene/pylucene/trunk/test3/test_Collections.py
    lucene/pylucene/trunk/test3/test_DocBoost.py
    lucene/pylucene/trunk/test3/test_FuzzyQuery.py
    lucene/pylucene/trunk/test3/test_Highlighter.py
    lucene/pylucene/trunk/test3/test_ICUTransformFilter.py
    lucene/pylucene/trunk/test3/test_IndexDeletionPolicy.py
    lucene/pylucene/trunk/test3/test_PerFieldAnalyzerWrapper.py
    lucene/pylucene/trunk/test3/test_PyLuceneThread.py
    lucene/pylucene/trunk/test3/test_PythonDirectory.py
    lucene/pylucene/trunk/test3/test_PythonQueryParser.py
    lucene/pylucene/trunk/test3/test_RewriteQuery.py
    lucene/pylucene/trunk/test3/test_StopAnalyzer.py
    lucene/pylucene/trunk/test3/test_StopWords.py
    lucene/pylucene/trunk/test3/test_ThaiAnalyzer.py
    lucene/pylucene/trunk/test3/test_bug1564.py
    lucene/pylucene/trunk/test3/test_bug1842.py

Modified: lucene/pylucene/trunk/CHANGES
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/CHANGES?rev=1787657&r1=1787656&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/CHANGES (original)
+++ lucene/pylucene/trunk/CHANGES Sun Mar 19 20:29:12 2017
@@ -1,3 +1,9 @@
+Version 6.4.1 ->
+----------------------
+ - PyLucene built with JCC 3.0
+ - added support for Python 3.6
+ - 
+
 Version 6.2.0 -> 6.4.1
 ----------------------
  - using Lucene 6.4.1 sources

Modified: lucene/pylucene/trunk/Makefile
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/Makefile?rev=1787657&r1=1787656&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/Makefile (original)
+++ lucene/pylucene/trunk/Makefile Sun Mar 19 20:29:12 2017
@@ -43,8 +43,15 @@ LUCENE=$(LUCENE_SRC)/lucene
 # limit.
 #
 
+# Mac OS X 10.12 (64-bit Python 3.6, Java 1.8)
+#PREFIX_PYTHON=/Users/vajda/apache/pylucene/_install3
+#ANT=/Users/vajda/tmp/apache-ant-1.9.3/bin/ant
+#PYTHON=$(PREFIX_PYTHON)/bin/python
+#JCC=$(PYTHON) -m jcc.__main__ --shared --arch x86_64
+#NUM_FILES=8
+
 # Mac OS X 10.12 (64-bit Python 2.7, Java 1.8)
-#PREFIX_PYTHON=/Users/vajda/apache/pylucene/_install
+#PREFIX_PYTHON=/Users/vajda/apache/pylucene/_install2
 #ANT=/Users/vajda/tmp/apache-ant-1.9.3/bin/ant
 #PYTHON=$(PREFIX_PYTHON)/bin/python
 #JCC=$(PYTHON) -m jcc.__main__ --shared --arch x86_64
@@ -264,7 +271,7 @@ ICURES= $(LUCENE)/analysis/icu/src/resou
 RESOURCES=--resources $(ICURES)
 
 ifneq ($(PYTHON),)
-ENDIANNESS:=$(shell $(PYTHON) -c "import struct; print struct.pack('h', 1) == '\000\001' and 'b' or 'l'")
+ENDIANNESS:=$(shell $(PYTHON) -c "import struct; print(struct.pack('h', 1) == '\000\001' and 'b' or 'l')")
 endif
 
 resources: $(ICURES)/org/apache/lucene/analysis/icu/utr30.dat
@@ -357,16 +364,18 @@ ifeq ($(findstring CYGWIN,$(OS)),CYGWIN)
   BUILD_TEST:=`cygpath -aw $(BUILD_TEST)`
 else
   ifeq ($(findstring MINGW,$(OS)),MINGW)
-    BUILD_TEST:=`$(PYTHON) -c "import os, sys; print os.path.normpath(sys.argv[1]).replace(chr(92), chr(92)*2)" $(BUILD_TEST)`
+    BUILD_TEST:=`$(PYTHON) -c "import os, sys; print(os.path.normpath(sys.argv[1]).replace(chr(92), chr(92)*2))" $(BUILD_TEST)`
   endif
 endif
 
+TEST_DIR:=`$(PYTHON) -c "import sys; print('test%s' %(sys.version_info[0]))"`
+
 install-test:
 	mkdir -p $(BUILD_TEST)
 	PYTHONPATH=$(BUILD_TEST) $(GENERATE) --install $(DEBUG_OPT) --install-dir $(BUILD_TEST)
 
 test: install-test
-	find test -name 'test_*.py' | PYTHONPATH=$(BUILD_TEST) xargs -t -n 1 $(PYTHON)
+	find $(TEST_DIR) -name 'test_*.py' | PYTHONPATH=$(BUILD_TEST) xargs -t -n 1 $(PYTHON)
 
 ARCHIVE=pylucene-$(VERSION)-src.tar.gz
 

Modified: lucene/pylucene/trunk/python/collections.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/python/collections.py?rev=1787657&r1=1787656&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/python/collections.py (original)
+++ lucene/pylucene/trunk/python/collections.py Sun Mar 19 20:29:12 2017
@@ -10,6 +10,7 @@
 #   See the License for the specific language governing permissions and
 #   limitations under the License.
 
+import sys
 from lucene import JArray
 
 from java.lang import IllegalStateException, IndexOutOfBoundsException
@@ -18,6 +19,14 @@ from org.apache.pylucene.util import \
     PythonSet, PythonList, PythonIterator, PythonListIterator
 
 
+if sys.version_info[0] > 2:
+    def next(iterator):
+        return iterator.__next__();
+else:
+    def next(iterator):
+        return iterator.next()
+
+
 class JavaSet(PythonSet):
     """
     This class implements java.util.Set around a Python set instance it wraps.
@@ -76,7 +85,7 @@ class JavaSet(PythonSet):
                 if hasattr(_self, '_next'):
                     return True
                 try:
-                    _self._next = _self._iterator.next()
+                    _self._next = next(_self._iterator)
                     return True
                 except StopIteration:
                     return False
@@ -85,7 +94,7 @@ class JavaSet(PythonSet):
                     next = _self._next
                     del _self._next
                 else:
-                    next = _self._iterator.next()
+                    next = next(_self._iterator)
                 return next
         return _iterator()
 
@@ -134,7 +143,7 @@ class JavaListIterator(PythonListIterato
 
     def next(self):
         if self.index >= len(self._lst):
-            raise JavaError, NoSuchElementException(str(self.index))
+            raise JavaError(NoSuchElementException(str(self.index)))
         result = self._lst[self.index]
         self._lastIndex = self.index
         self.index += 1
@@ -142,7 +151,7 @@ class JavaListIterator(PythonListIterato
 
     def previous(self):
         if self.index <= 0:
-            raise JavaError, NoSuchElementException(str(self.index - 1))
+            raise JavaError(NoSuchElementException(str(self.index - 1)))
         self.index -= 1
         self._lastIndex = self.index
         return self._lst[self.index]
@@ -167,7 +176,7 @@ class JavaListIterator(PythonListIterato
         element that would be returned by previous, if any.
         """
         if self._lastIndex < 0:
-            raise JavaError, IllegalStateException("add")
+            raise JavaError(IllegalStateException("add"))
         self._lst.insert(self.index, element)
         self.index += 1
         self._lastIndex = -1 # invalidate state
@@ -178,7 +187,7 @@ class JavaListIterator(PythonListIterato
         was returned by next or previous.
         """
         if self._lastIndex < 0:
-            raise JavaError, IllegalStateException("remove")
+            raise JavaError(IllegalStateException("remove"))
         del self._lst[self._lastIndex]
         self._lastIndex = -1 # invalidate state
 
@@ -188,7 +197,7 @@ class JavaListIterator(PythonListIterato
         with the specified element.
         """
         if self._lastIndex < 0:
-            raise JavaError, IllegalStateException("set")
+            raise JavaError(IllegalStateException("set"))
         self._lst[self._lastIndex] = element
 
     def __iter__(self):
@@ -245,7 +254,7 @@ class JavaList(PythonList):
 
     def get(self, index):
         if index < 0 or index >= self.size():
-            raise JavaError, IndexOutOfBoundsException(str(index))
+            raise JavaError(IndexOutOfBoundsException(str(index)))
         return self._lst[index]
 
     def indexOf(self, obj):
@@ -266,7 +275,7 @@ class JavaList(PythonList):
                 if hasattr(_self, '_next'):
                     return True
                 try:
-                    _self._next = _self._iterator.next()
+                    _self._next = next(_self._iterator)
                     return True
                 except StopIteration:
                     return False
@@ -275,7 +284,7 @@ class JavaList(PythonList):
                     next = _self._next
                     del _self._next
                 else:
-                    next = _self._iterator.next()
+                    next = next(_self._iterator)
                 return next
         return _iterator()
 
@@ -352,5 +361,5 @@ class JavaList(PythonList):
 
     def set(self, index, obj):
         if index < 0 or index >= self.size():
-            raise JavaError, IndexOutOfBoundsException(str(index))
+            raise JavaError(IndexOutOfBoundsException(str(index)))
         self._lst[index] = obj

Modified: lucene/pylucene/trunk/test3/BaseTokenStreamTestCase.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/BaseTokenStreamTestCase.py?rev=1787657&r1=1759171&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test3/BaseTokenStreamTestCase.py (original)
+++ lucene/pylucene/trunk/test3/BaseTokenStreamTestCase.py Sun Mar 19 20:29:12 2017
@@ -62,8 +62,8 @@ class BaseTokenStreamTestCase(TestCase):
 
         #checkClearAtt = ts.addAttribute(PythonAttribute.class_);
 
-        self.assert_(output is not None)
-        self.assert_(ts.hasAttribute(CharTermAttribute.class_),
+        self.assertTrue(output is not None)
+        self.assertTrue(ts.hasAttribute(CharTermAttribute.class_),
                                      "has no CharTermAttribute")
 
         termAtt = ts.getAttribute(CharTermAttribute.class_)
@@ -72,24 +72,24 @@ class BaseTokenStreamTestCase(TestCase):
         if (startOffsets is not None or
             endOffsets is not None or
             finalOffset is not None):
-            self.assert_(ts.hasAttribute(OffsetAttribute.class_),
+            self.assertTrue(ts.hasAttribute(OffsetAttribute.class_),
                                          "has no OffsetAttribute")
             offsetAtt = ts.getAttribute(OffsetAttribute.class_)
 
         typeAtt = None
         if types is not None:
-            self.assert_(ts.hasAttribute(TypeAttribute.class_),
+            self.assertTrue(ts.hasAttribute(TypeAttribute.class_),
                          "has no TypeAttribute")
             typeAtt = ts.getAttribute(TypeAttribute.class_)
 
         posIncrAtt = None
         if posIncrements is not None:
-            self.assert_(ts.hasAttribute(PositionIncrementAttribute.class_),
+            self.assertTrue(ts.hasAttribute(PositionIncrementAttribute.class_),
                          "has no PositionIncrementAttribute")
             posIncrAtt = ts.getAttribute(PositionIncrementAttribute.class_)
 
         ts.reset()
-        for i in xrange(len(output)):
+        for i in range(len(output)):
             # extra safety to enforce, that the state is not preserved and
             # also assign bogus values
             ts.clearAttributes()
@@ -101,7 +101,7 @@ class BaseTokenStreamTestCase(TestCase):
             if posIncrAtt is not None:
                 posIncrAtt.setPositionIncrement(45987657)
 
-            self.assert_(ts.incrementToken(), "token %d exists" %(i))
+            self.assertTrue(ts.incrementToken(), "token %d exists" %(i))
             self.assertEqual(output[i], termAtt.toString(), "term %d" %(i))
             if startOffsets is not None:
                 self.assertEqual(startOffsets[i], offsetAtt.startOffset(),
@@ -116,7 +116,7 @@ class BaseTokenStreamTestCase(TestCase):
                                  posIncrAtt.getPositionIncrement(),
                                  "posIncrement %d" %(i))
 
-        self.assert_(not ts.incrementToken(), "end of stream")
+        self.assertTrue(not ts.incrementToken(), "end of stream")
         ts.end()
         ts.close()
 

Modified: lucene/pylucene/trunk/test3/test_BinaryDocument.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_BinaryDocument.py?rev=1787657&r1=1759171&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test3/test_BinaryDocument.py (original)
+++ lucene/pylucene/trunk/test3/test_BinaryDocument.py Sun Mar 19 20:29:12 2017
@@ -25,8 +25,8 @@ from org.apache.lucene.util import Versi
 
 class TestBinaryDocument(PyLuceneTestCase):
 
-    binaryValStored = "this text will be stored as a byte array in the index"
-    binaryValCompressed = "this text will be also stored and compressed as a byte array in the index"
+    binaryValStored = b"this text will be stored as a byte array in the index"
+    binaryValCompressed = b"this text will be also stored and compressed as a byte array in the index"
 
     def testBinaryFieldInIndex(self):
 
@@ -52,18 +52,18 @@ class TestBinaryDocument(PyLuceneTestCas
         # open a reader and fetch the document
         reader = self.getReader()
         docFromReader = reader.document(0)
-        self.assert_(docFromReader is not None)
+        self.assertTrue(docFromReader is not None)
 
         # fetch the binary stored field and compare it's content with the
         # original one
         bytes = docFromReader.getBinaryValue("binaryStored")
-        binaryFldStoredTest = bytes.bytes.string_
+        binaryFldStoredTest = bytes.bytes.bytes_
         self.assertEqual(binaryFldStoredTest, self.binaryValStored)
 
         # fetch the string field and compare it's content with the original
         # one
         stringFldStoredTest = docFromReader.get("stringStored")
-        self.assertEqual(stringFldStoredTest, self.binaryValStored)
+        self.assertEqual(stringFldStoredTest, self.binaryValStored.decode())
 
         reader.close()
 
@@ -85,14 +85,14 @@ class TestBinaryDocument(PyLuceneTestCas
         # open a reader and fetch the document
         reader = self.getReader()
         docFromReader = reader.document(0)
-        self.assert_(docFromReader is not None)
+        self.assertTrue(docFromReader is not None)
 
         # fetch the binary compressed field and compare it's content with
         # the original one
         bytes = CompressionTools.decompress(docFromReader.getBinaryValue("binaryCompressed"))
-        binaryFldCompressedTest = bytes.string_
+        binaryFldCompressedTest = bytes.bytes_
         self.assertEqual(binaryFldCompressedTest, self.binaryValCompressed)
-        self.assertEqual(CompressionTools.decompressString(docFromReader.getBinaryValue("stringCompressed")), self.binaryValCompressed)
+        self.assertEqual(CompressionTools.decompressString(docFromReader.getBinaryValue("stringCompressed")), self.binaryValCompressed.decode())
 
         reader.close()
 

Modified: lucene/pylucene/trunk/test3/test_BooleanQuery.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_BooleanQuery.py?rev=1787657&r1=1759171&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test3/test_BooleanQuery.py (original)
+++ lucene/pylucene/trunk/test3/test_BooleanQuery.py Sun Mar 19 20:29:12 2017
@@ -49,7 +49,7 @@ class TestBooleanQuery(PyLuceneTestCase)
         b2.add(nested2, BooleanClause.Occur.SHOULD)
         bq2 = b2.build()
 
-        self.assert_(bq1.equals(bq2))
+        self.assertTrue(bq1.equals(bq2))
 
 
 if __name__ == "__main__":

Modified: lucene/pylucene/trunk/test3/test_Collections.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_Collections.py?rev=1787657&r1=1759171&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test3/test_Collections.py (original)
+++ lucene/pylucene/trunk/test3/test_Collections.py Sun Mar 19 20:29:12 2017
@@ -131,7 +131,7 @@ class Test_CollectionsListBase(unittest.
     def createTestList(self):
         """creates the test list for this test case
         """
-        return range(9)
+        return list(range(9))
 
     def setUp(self):
         self.testList = self.createTestList()
@@ -236,7 +236,7 @@ class Test_CollectionsListBase(unittest.
 class Test_CollectionsStringList(Test_CollectionsListBase):
 
     def createTestList(self):
-        return [u'a', u'b', u'c']
+        return ['a', 'b', 'c']
 
 
 class Test_CollectionsFloatList(Test_CollectionsListBase):

Modified: lucene/pylucene/trunk/test3/test_DocBoost.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_DocBoost.py?rev=1787657&r1=1759171&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test3/test_DocBoost.py (original)
+++ lucene/pylucene/trunk/test3/test_DocBoost.py Sun Mar 19 20:29:12 2017
@@ -64,7 +64,7 @@ class DocBoostTestCase(PyLuceneTestCase)
 
         lastScore = 0.0
         for score in scores:
-            self.assert_(score > lastScore)
+            self.assertTrue(score > lastScore)
             lastScore = score
 
 

Modified: lucene/pylucene/trunk/test3/test_FuzzyQuery.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_FuzzyQuery.py?rev=1787657&r1=1759171&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test3/test_FuzzyQuery.py (original)
+++ lucene/pylucene/trunk/test3/test_FuzzyQuery.py Sun Mar 19 20:29:12 2017
@@ -13,7 +13,7 @@
 # ====================================================================
 
 import sys, lucene, unittest
-from itertools import izip
+
 from lucene import JavaError
 from PyLuceneTestCase import PyLuceneTestCase
 
@@ -86,7 +86,7 @@ class FuzzyQueryTestCase(PyLuceneTestCas
         self.assertEqual(3, len(hits), "3 documents should match")
 
         order = ("bbbbb", "abbbb", "aabbb")
-        for hit, o in izip(hits, order):
+        for hit, o in zip(hits, order):
             term = searcher.doc(hit.doc).get("field")
             self.assertEqual(o, term)
 
@@ -98,7 +98,7 @@ class FuzzyQueryTestCase(PyLuceneTestCas
         hits = searcher.search(query, 1000).scoreDocs
         self.assertEqual(2, len(hits), "only 2 documents should match");
         order = ("bbbbb","abbbb")
-        for hit, o in izip(hits, order):
+        for hit, o in zip(hits, order):
             term = searcher.doc(hit.doc).get("field")
             self.assertEqual(o, term)
 
@@ -346,7 +346,7 @@ class FuzzyQueryTestCase(PyLuceneTestCas
         try:
             q = FuzzyQuery(Term("field", "t"), 3)
             self.fail()
-        except JavaError, e:
+        except JavaError as e:
             #expected
             pass
 

Modified: lucene/pylucene/trunk/test3/test_Highlighter.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_Highlighter.py?rev=1787657&r1=1759171&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test3/test_Highlighter.py (original)
+++ lucene/pylucene/trunk/test3/test_Highlighter.py Sun Mar 19 20:29:12 2017
@@ -86,7 +86,7 @@ class HighlighterTestCase(PyLuceneTestCa
             result = highlighter.getBestFragments(tokenStream, text,
                                                   maxNumFragmentsRequired,
                                                   "...")
-            print "\t", result
+            print("\t", result)
 
         # Not sure we can assert anything here - just running to check we don't
         # throw any exceptions
@@ -95,7 +95,7 @@ class HighlighterTestCase(PyLuceneTestCa
 
         self.doSearching("Wicked")
         self.doStandardHighlights()
-        self.assert_(self.numHighlights == 3,
+        self.assertTrue(self.numHighlights == 3,
                      ("Failed to find correct number of highlights, %d found"
                       %(self.numHighlights)))
 
@@ -107,7 +107,7 @@ class HighlighterTestCase(PyLuceneTestCa
         # fuzzy etc) you must use a rewritten query!
         self.query = self.query.rewrite(self.reader)
 
-        print "Searching for:", self.query.toString(self.FIELD_NAME)
+        print("Searching for:", self.query.toString(self.FIELD_NAME))
         self.scoreDocs = self.searcher.search(self.query, 100).scoreDocs
         self.numHighlights = 0
 
@@ -128,7 +128,7 @@ class HighlighterTestCase(PyLuceneTestCa
                                                   text,
                                                   maxNumFragmentsRequired,
                                                   fragmentSeparator)
-            print "\t", result
+            print("\t", result)
 
     def countHighlightTerm(self):
 

Copied: lucene/pylucene/trunk/test3/test_ICUFoldingFilter.py (from r1759972, lucene/pylucene/trunk/test/test_ICUFoldingFilter.py)
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_ICUFoldingFilter.py?p2=lucene/pylucene/trunk/test3/test_ICUFoldingFilter.py&p1=lucene/pylucene/trunk/test/test_ICUFoldingFilter.py&r1=1759972&r2=1787657&rev=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_ICUFoldingFilter.py (original)
+++ lucene/pylucene/trunk/test3/test_ICUFoldingFilter.py Sun Mar 19 20:29:12 2017
@@ -18,7 +18,7 @@
 
 try:
     from icu import Normalizer2, UNormalizationMode2
-except ImportError, e:
+except ImportError as e:
     pass
 
 import sys, lucene, unittest
@@ -49,32 +49,32 @@ class TestICUFoldingFilter(BaseTokenStre
                                [ "this", "is", "a", "test" ])
 
         # case folding
-        self._assertAnalyzesTo(a, u"Ruß", [ "russ" ])
+        self._assertAnalyzesTo(a, "Ruß", [ "russ" ])
 
         # case folding with accent removal
-        self._assertAnalyzesTo(a, u"ΜΆΪΟΣ", [ u"μαιοσ" ])
-        self._assertAnalyzesTo(a, u"Μάϊος", [ u"μαιοσ" ])
+        self._assertAnalyzesTo(a, "ΜΆΪΟΣ", [ "μαιοσ" ])
+        self._assertAnalyzesTo(a, "Μάϊος", [ "μαιοσ" ])
 
         # supplementary case folding
-        self._assertAnalyzesTo(a, u"𐐖", [ u"𐐾" ])
+        self._assertAnalyzesTo(a, "𐐖", [ "𐐾" ])
 
         # normalization
-        self._assertAnalyzesTo(a, u"ﴳﴺﰧ", [ u"طمطمطم" ])
+        self._assertAnalyzesTo(a, "ﴳﴺﰧ", [ "طمطمطم" ])
 
         # removal of default ignorables
-        self._assertAnalyzesTo(a, u"क्‍ष", [ u"कष" ])
+        self._assertAnalyzesTo(a, "क्‍ष", [ "कष" ])
 
         # removal of latin accents (composed)
-        self._assertAnalyzesTo(a, u"résumé", [ "resume" ])
+        self._assertAnalyzesTo(a, "résumé", [ "resume" ])
 
         # removal of latin accents (decomposed)
-        self._assertAnalyzesTo(a, u"re\u0301sume\u0301", [ u"resume" ])
+        self._assertAnalyzesTo(a, "re\u0301sume\u0301", [ "resume" ])
 
         # fold native digits
-        self._assertAnalyzesTo(a, u"৭০৬", [ "706" ])
+        self._assertAnalyzesTo(a, "৭০৬", [ "706" ])
 
         # ascii-folding-filter type stuff
-        self._assertAnalyzesTo(a, u"đis is cræzy", [ "dis", "is", "craezy" ])
+        self._assertAnalyzesTo(a, "đis is cræzy", [ "dis", "is", "craezy" ])
 
 
 if __name__ == "__main__":
@@ -95,4 +95,4 @@ if __name__ == "__main__":
             else:
                  unittest.main()
         else:
-            print >>sys.stderr, "ICU version >= 49 is required, running:", icu.ICU_VERSION
+            print("ICU version >= 49 is required, running:", icu.ICU_VERSION, file=sys.stderr)

Copied: lucene/pylucene/trunk/test3/test_ICUNormalizer2Filter.py (from r1759972, lucene/pylucene/trunk/test/test_ICUNormalizer2Filter.py)
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_ICUNormalizer2Filter.py?p2=lucene/pylucene/trunk/test3/test_ICUNormalizer2Filter.py&p1=lucene/pylucene/trunk/test/test_ICUNormalizer2Filter.py&r1=1759972&r2=1787657&rev=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_ICUNormalizer2Filter.py (original)
+++ lucene/pylucene/trunk/test3/test_ICUNormalizer2Filter.py Sun Mar 19 20:29:12 2017
@@ -18,7 +18,7 @@
 
 try:
     from icu import Normalizer2, UNormalizationMode2
-except ImportError, e:
+except ImportError as e:
     pass
 
 import sys, lucene, unittest
@@ -52,17 +52,17 @@ class TestICUNormalizer2Filter(BaseToken
         self._assertAnalyzesTo(a, "Ruß", [ "russ" ])
 
         # case folding
-        self._assertAnalyzesTo(a, u"ΜΆΪΟΣ", [ u"μάϊοσ" ])
-        self._assertAnalyzesTo(a, u"Μάϊος", [ u"μάϊοσ" ])
+        self._assertAnalyzesTo(a, "ΜΆΪΟΣ", [ "μάϊοσ" ])
+        self._assertAnalyzesTo(a, "Μάϊος", [ "μάϊοσ" ])
 
         # supplementary case folding
-        self._assertAnalyzesTo(a, u"𐐖", [ u"𐐾" ])
+        self._assertAnalyzesTo(a, "𐐖", [ "𐐾" ])
 
         # normalization
-        self._assertAnalyzesTo(a, u"ﴳﴺﰧ", [ u"طمطمطم" ])
+        self._assertAnalyzesTo(a, "ﴳﴺﰧ", [ "طمطمطم" ])
 
         # removal of default ignorables
-        self._assertAnalyzesTo(a, u"क्‍ष", [ u"क्ष" ])
+        self._assertAnalyzesTo(a, "क्‍ष", [ "क्ष" ])
 
     def testAlternate(self):
 
@@ -81,7 +81,7 @@ class TestICUNormalizer2Filter(BaseToken
 
         a = analyzer()
         # decompose EAcute into E + combining Acute
-        self._assertAnalyzesTo(a, u"\u00E9", [ u"\u0065\u0301" ])
+        self._assertAnalyzesTo(a, "\u00E9", [ "\u0065\u0301" ])
 
 
 if __name__ == "__main__":

Modified: lucene/pylucene/trunk/test3/test_ICUTransformFilter.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_ICUTransformFilter.py?rev=1787657&r1=1759171&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test3/test_ICUTransformFilter.py (original)
+++ lucene/pylucene/trunk/test3/test_ICUTransformFilter.py Sun Mar 19 20:29:12 2017
@@ -18,7 +18,7 @@
 
 try:
     from icu import Transliterator, UTransDirection
-except ImportError, e:
+except ImportError as e:
     pass
 
 import sys, lucene, unittest
@@ -47,17 +47,17 @@ class TestICUTransformFilter(BaseTokenSt
     def testBasicFunctionality(self):
 
         self._checkToken(self._getTransliterator("Traditional-Simplified"),
-                         u"簡化字", u"简化字")
+                         "簡化字", "简化字")
         self._checkToken(self._getTransliterator("Katakana-Hiragana"),
-                         u"ヒラガナ", u"ひらがな")
+                         "ヒラガナ", "ひらがな")
         self._checkToken(self._getTransliterator("Fullwidth-Halfwidth"),
-                         u"アルアノリウ", u"アルアノリウ")
+                         "アルアノリウ", "アルアノリウ")
         self._checkToken(self._getTransliterator("Any-Latin"),
-                         u"Αλφαβητικός Κατάλογος", u"Alphabētikós Katálogos")
+                         "Αλφαβητικός Κατάλογος", "Alphabētikós Katálogos")
         self._checkToken(self._getTransliterator("NFD; [:Nonspacing Mark:] Remove"),
-                         u"Alphabētikós Katálogos", u"Alphabetikos Katalogos")
+                         "Alphabētikós Katálogos", "Alphabetikos Katalogos")
         self._checkToken(self._getTransliterator("Han-Latin"),
-                         u"中国", u"zhōng guó")
+                         "中国", "zhōng guó")
 
     def testCustomFunctionality(self):
 

Modified: lucene/pylucene/trunk/test3/test_IndexDeletionPolicy.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_IndexDeletionPolicy.py?rev=1787657&r1=1759171&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test3/test_IndexDeletionPolicy.py (original)
+++ lucene/pylucene/trunk/test3/test_IndexDeletionPolicy.py Sun Mar 19 20:29:12 2017
@@ -60,14 +60,14 @@ class IndexDeletionPolicyTestCase(PyLuce
         self.assertTrue(self.policy.onCommitCalled)
 
         # external IR sees 1 commit:
-        self.assertEquals(1, DirectoryReader.listCommits(self.directory).size())
+        self.assertEqual(1, DirectoryReader.listCommits(self.directory).size())
 
         # commit again:
         writer.addDocument(doc)
         writer.commit()
 
         # external IR sees 2 commits:
-        self.assertEquals(2, DirectoryReader.listCommits(self.directory).size())
+        self.assertEqual(2, DirectoryReader.listCommits(self.directory).size())
 
         writer.close()
 
@@ -76,11 +76,11 @@ class IndexDeletionPolicyTestCase(PyLuce
 
         self.assertTrue(self.policy.onInitCalled)
         self.assertFalse(self.policy.onCommitCalled)
-        self.assertEquals(2, DirectoryReader.listCommits(self.directory).size())
+        self.assertEqual(2, DirectoryReader.listCommits(self.directory).size())
         writer.close()
 
         # 3 from closing writer again
-        self.assertEquals(3, DirectoryReader.listCommits(self.directory).size())
+        self.assertEqual(3, DirectoryReader.listCommits(self.directory).size())
 
 if __name__ == "__main__":
     lucene.initVM()

Modified: lucene/pylucene/trunk/test3/test_PerFieldAnalyzerWrapper.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_PerFieldAnalyzerWrapper.py?rev=1787657&r1=1759171&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test3/test_PerFieldAnalyzerWrapper.py (original)
+++ lucene/pylucene/trunk/test3/test_PerFieldAnalyzerWrapper.py Sun Mar 19 20:29:12 2017
@@ -38,14 +38,14 @@ class PerFieldAnalyzerTestCase(PyLuceneT
         tokenStream.reset()
         termAtt = tokenStream.getAttribute(CharTermAttribute.class_)
 
-        self.assert_(tokenStream.incrementToken())
+        self.assertTrue(tokenStream.incrementToken())
         self.assertEqual("Qwerty", termAtt.toString(),
                          "WhitespaceAnalyzer does not lowercase")
 
         tokenStream = analyzer.tokenStream("special", StringReader(text))
         tokenStream.reset()
         termAtt = tokenStream.getAttribute(CharTermAttribute.class_)
-        self.assert_(tokenStream.incrementToken())
+        self.assertTrue(tokenStream.incrementToken())
         self.assertEqual("qwerty", termAtt.toString(),
                          "SimpleAnalyzer lowercases")
 

Copied: lucene/pylucene/trunk/test3/test_PositionIncrement.py (from r1759972, lucene/pylucene/trunk/test/test_PositionIncrement.py)
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_PositionIncrement.py?p2=lucene/pylucene/trunk/test3/test_PositionIncrement.py&p1=lucene/pylucene/trunk/test/test_PositionIncrement.py&r1=1759972&r2=1787657&rev=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_PositionIncrement.py (original)
+++ lucene/pylucene/trunk/test3/test_PositionIncrement.py Sun Mar 19 20:29:12 2017
@@ -180,7 +180,6 @@ class PositionIncrementTestCase(PyLucene
     def testPayloadsPos0(self):
 
         writer = self.getWriter(analyzer=TestPayloadAnalyzer())
-
         doc = Document()
         doc.add(Field("content", "a a b c d e a f g h i j a b k k",
                       TextField.TYPE_STORED))
@@ -191,7 +190,7 @@ class PositionIncrementTestCase(PyLucene
         tp = reader.postings(Term("content", "a"), PostingsEnum.ALL)
 
         count = 0
-        self.assert_(tp.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+        self.assertTrue(tp.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
 
         # "a" occurs 4 times
         self.assertEqual(4, tp.freq())
@@ -225,8 +224,8 @@ class PositionIncrementTestCase(PyLucene
                 for payload in collector.payloads:
                     count += 1
 
-        self.assert_(sawZero)
-        self.assertEquals(8, count)
+        self.assertTrue(sawZero)
+        self.assertEqual(8, count)
 
         spans = snq.createWeight(searcher, False).getSpans(
             searcher.getIndexReader().leaves().get(0),
@@ -239,8 +238,8 @@ class PositionIncrementTestCase(PyLucene
                 count += 1
                 sawZero = sawZero or spans.startPosition() == 0
 
-        self.assertEquals(4, count)
-        self.assert_(sawZero)
+        self.assertEqual(4, count)
+        self.assertTrue(sawZero)
 
 
 class PayloadSpanCollector(PythonSpanCollector):
@@ -282,7 +281,7 @@ class PayloadFilter(PythonTokenFilter):
     def incrementToken(self):
 
         if self.input.incrementToken():
-            bytes = JArray('byte')("pos: %d" %(self.pos))
+            bytes = JArray('byte')(b"pos: %d" %(self.pos))
             self.payloadAttr.setPayload(BytesRef(bytes))
 
             if self.pos == 0 or self.i % 2 == 1:

Copied: lucene/pylucene/trunk/test3/test_PyLucene.py (from r1781941, lucene/pylucene/trunk/test/test_PyLucene.py)
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_PyLucene.py?p2=lucene/pylucene/trunk/test3/test_PyLucene.py&p1=lucene/pylucene/trunk/test/test_PyLucene.py&r1=1781941&r2=1787657&rev=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_PyLucene.py (original)
+++ lucene/pylucene/trunk/test3/test_PyLucene.py Sun Mar 19 20:29:12 2017
@@ -136,7 +136,7 @@ class Test_PyLuceneBase(object):
             # using a unicode body cause problems, which seems very odd
             # since the python type is the same regardless affter doing
             # the encode
-            body_text = u"hello world"*20
+            body_text = "hello world"*20
             body_reader = StringReader(body_text)
             doc.add(Field("content", body_reader, TextField.TYPE_NOT_STORED))
 
@@ -173,7 +173,7 @@ class Test_PyLuceneBase(object):
                                                 [SHOULD, SHOULD],
                                                 self.getAnalyzer())
             topDocs = searcher.search(query, 50)
-            self.assertEquals(1, topDocs.totalHits)
+            self.assertEqual(1, topDocs.totalHits)
         finally:
             self.closeStore(store)
         
@@ -285,16 +285,16 @@ class Test_PyLuceneBase(object):
             reader = DirectoryReader.open(store)
             fieldInfos = MultiFields.getMergedFieldInfos(reader)
             for fieldInfo in fieldInfos.iterator():
-                self.assert_(fieldInfo.name in ['owner', 'search_name',
+                self.assertTrue(fieldInfo.name in ['owner', 'search_name',
                                                 'meta_words', 'docid', 'title'])
 
                 if fieldInfo.getIndexOptions() != IndexOptions.NONE:
-                    self.assert_(fieldInfo.name in ['owner', 'meta_words',
+                    self.assertTrue(fieldInfo.name in ['owner', 'meta_words',
                                                     'docid', 'title'])
 
                 if (fieldInfo.getIndexOptions() != IndexOptions.NONE and
                     not fieldInfo.hasVectors()):
-                    self.assert_(fieldInfo.name in ['owner', 'meta_words',
+                    self.assertTrue(fieldInfo.name in ['owner', 'meta_words',
                                                     'docid', 'title'])
         finally:
             store = self.closeStore(store, reader)
@@ -314,7 +314,7 @@ class Test_PyLuceneWithFSStore(unittest.
         if os.path.exists(self.STORE_DIR):
             try:
                 shutil.rmtree(self.STORE_DIR)
-            except Exception, e:
+            except Exception as e:
                 # maybe leaking file handles in closing stores
                 # does not affect other tests
                 if e.__name__ == 'WindowsError':

Modified: lucene/pylucene/trunk/test3/test_PyLuceneThread.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_PyLuceneThread.py?rev=1787657&r1=1759171&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test3/test_PyLuceneThread.py (original)
+++ lucene/pylucene/trunk/test3/test_PyLuceneThread.py Sun Mar 19 20:29:12 2017
@@ -65,7 +65,7 @@ class PyLuceneThreadTestCase(PyLuceneTes
         """ Run 5 threads with 2000 queries each """
 
         threads = []
-        for i in xrange(5):
+        for i in range(5):
             threads.append(threading.Thread(target=self.runSearch,
                                             args=(2000,)))
 

Modified: lucene/pylucene/trunk/test3/test_PythonDirectory.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_PythonDirectory.py?rev=1787657&r1=1759171&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test3/test_PythonDirectory.py (original)
+++ lucene/pylucene/trunk/test3/test_PythonDirectory.py Sun Mar 19 20:29:12 2017
@@ -40,7 +40,7 @@ class DebugWrapper(object):
         self.obj = obj
 
     def __getattr__(self, name):
-        print self.obj.__class__.__name__, self.obj.name, name
+        print(self.obj.__class__.__name__, self.obj.name, name)
         sys.stdout.flush()
         return getattr(self.obj, name)
 
@@ -66,7 +66,7 @@ class PythonFileStreamInput(PythonIndexI
         self.isClone = clone
 
     def length(self):
-        return long(self._length)
+        return int(self._length)
 
     def clone(self):
         clone = PythonFileStreamInput(self.name, self.fh, self._length, True)
@@ -102,16 +102,16 @@ class PythonFileStreamOutput(PythonIndex
             self.fh.close()
 
     def getFilePointer(self):
-        return long(self._length)
+        return int(self._length)
 
     def getChecksum(self):
-        return long(self.crc & 0xffffffff)
+        return int(self.crc & 0xffffffff)
 
     def writeByte(self, b):
         if b < 0:
-            data = chr(b + 256)
+            data = bytes([b + 256])
         else:
-            data = chr(b)
+            data = bytes([b])
         self.fh.write(data)
         self._length += 1
 
@@ -121,7 +121,7 @@ class PythonFileStreamOutput(PythonIndex
             self.crc = crc32(data, self.crc)
 
     def writeBytes(self, bytes):
-        data = bytes.string_
+        data = bytes.bytes_
         self.fh.write(data)
         self.fh.flush()
         self._length += len(data)
@@ -197,7 +197,7 @@ class PythonFileDirectory(PythonDirector
 
     def fileLength(self, name):
         file_path = os.path.join(self.path, name)
-        return long(os.path.getsize(file_path))
+        return int(os.path.getsize(file_path))
 
     def listAll(self):
         return os.listdir(self.path)
@@ -216,7 +216,7 @@ class PythonFileDirectory(PythonDirector
         try:
             fh = open(file_path, "rb")
         except IOError:
-            raise JavaError, IOException(name)
+            raise JavaError(IOException(name))
         stream = PythonFileStreamInput(name, fh, os.path.getsize(file_path))
         self._streams.append(stream)
         return stream
@@ -255,9 +255,9 @@ class PythonDirectoryTests(unittest.Test
         store.close()
 
     def test_IncrementalLoop(self):
-        print "Testing Indexing Incremental Looping"
+        print("Testing Indexing Incremental Looping")
         for i in range(100):
-            print "indexing ", i
+            print("indexing ", i)
             sys.stdout.flush()
             self.test_indexDocument()
 
@@ -271,9 +271,9 @@ if __name__ == "__main__":
                 unittest.main()
             except:
                 pass
-            print 'inputs', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonIndexOutput', 0)
-            print 'outputs', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonIndexInput', 0)
-            print 'locks', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonLock', 0)
-            print 'dirs', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonLock', 0)
+            print('inputs', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonIndexOutput', 0))
+            print('outputs', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonIndexInput', 0))
+            print('locks', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonLock', 0))
+            print('dirs', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonLock', 0))
     else:
         unittest.main()

Modified: lucene/pylucene/trunk/test3/test_PythonQueryParser.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_PythonQueryParser.py?rev=1787657&r1=1759171&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test3/test_PythonQueryParser.py (original)
+++ lucene/pylucene/trunk/test3/test_PythonQueryParser.py Sun Mar 19 20:29:12 2017
@@ -44,7 +44,7 @@ class PythonQueryParserTestCase(PyLucene
         qp = TestQueryParser('all', StandardAnalyzer())
 
         q = qp.parse("foo bar")
-        self.assertEquals(str(q), "all:foo all:bar all:extra_clause")
+        self.assertEqual(str(q), "all:foo all:bar all:extra_clause")
 
 
 class PythonMultiFieldQueryParserTestCase(PyLuceneTestCase):
@@ -59,7 +59,7 @@ class PythonMultiFieldQueryParserTestCas
         q = qp.parse("foo bar", ['one', 'two'],
                      [BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD],
                      StandardAnalyzer())
-        self.assertEquals(str(q), "(one:foo one:bar) (two:foo two:bar)")
+        self.assertEqual(str(q), "(one:foo one:bar) (two:foo two:bar)")
 
 
 if __name__ == "__main__":

Modified: lucene/pylucene/trunk/test3/test_RewriteQuery.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_RewriteQuery.py?rev=1787657&r1=1759171&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test3/test_RewriteQuery.py (original)
+++ lucene/pylucene/trunk/test3/test_RewriteQuery.py Sun Mar 19 20:29:12 2017
@@ -39,7 +39,7 @@ class QueryRewriteTest(PyLuceneTestCase)
         base_query = TermQuery(self.term)
         new_query = base_query.rewrite(self.reader)
 
-        self.assertEquals(base_query, new_query)
+        self.assertEqual(base_query, new_query)
 
 
 if __name__ == "__main__":

Modified: lucene/pylucene/trunk/test3/test_StopAnalyzer.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_StopAnalyzer.py?rev=1787657&r1=1759171&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test3/test_StopAnalyzer.py (original)
+++ lucene/pylucene/trunk/test3/test_StopAnalyzer.py Sun Mar 19 20:29:12 2017
@@ -33,16 +33,16 @@ class StopAnalyzerTestCase(unittest.Test
 
     def testDefaults(self):
 
-        self.assert_(self.stop is not None)
+        self.assertTrue(self.stop is not None)
         reader = StringReader("This is a test of the english stop analyzer")
         stream = self.stop.tokenStream("test", reader)
-        self.assert_(stream is not None)
+        self.assertTrue(stream is not None)
         stream.reset()
 
         termAtt = stream.getAttribute(CharTermAttribute.class_)
 
         while stream.incrementToken():
-            self.assert_(termAtt.toString() not in self.invalidTokens)
+            self.assertTrue(termAtt.toString() not in self.invalidTokens)
 
     def testStopList(self):
 
@@ -52,14 +52,14 @@ class StopAnalyzerTestCase(unittest.Test
         newStop = StopAnalyzer(stopWordsSet)
         reader = StringReader("This is a good test of the english stop analyzer")
         stream = newStop.tokenStream("test", reader)
-        self.assert_(stream is not None)
+        self.assertTrue(stream is not None)
         stream.reset()
 
         termAtt = stream.getAttribute(CharTermAttribute.class_)
 
         while stream.incrementToken():
             text = termAtt.toString()
-            self.assert_(text not in stopWordsSet)
+            self.assertTrue(text not in stopWordsSet)
 
     def testStopListPositions(self):
 
@@ -70,7 +70,7 @@ class StopAnalyzerTestCase(unittest.Test
         reader = StringReader("This is a good test of the english stop analyzer with positions")
         expectedIncr = [ 1,   1, 1,          3, 1,  1,      1,            2,   1]
         stream = newStop.tokenStream("test", reader)
-        self.assert_(stream is not None)
+        self.assertTrue(stream is not None)
         stream.reset()
 
         i = 0
@@ -79,7 +79,7 @@ class StopAnalyzerTestCase(unittest.Test
 
         while stream.incrementToken():
             text = termAtt.toString()
-            self.assert_(text not in stopWordsSet)
+            self.assertTrue(text not in stopWordsSet)
             self.assertEqual(expectedIncr[i],
                              posIncrAtt.getPositionIncrement())
             i += 1

Modified: lucene/pylucene/trunk/test3/test_StopWords.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_StopWords.py?rev=1787657&r1=1759171&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test3/test_StopWords.py (original)
+++ lucene/pylucene/trunk/test3/test_StopWords.py Sun Mar 19 20:29:12 2017
@@ -36,7 +36,7 @@ class StopWordsTestCase(unittest.TestCas
             result = StandardTokenizer()
             result.setReader(self.reader)
             result = StopFilter(result, self.stop_set)
-        except Exception, e:
+        except Exception as e:
             self.fail(str(e))
 
 

Modified: lucene/pylucene/trunk/test3/test_ThaiAnalyzer.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_ThaiAnalyzer.py?rev=1787657&r1=1759171&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test3/test_ThaiAnalyzer.py (original)
+++ lucene/pylucene/trunk/test3/test_ThaiAnalyzer.py Sun Mar 19 20:29:12 2017
@@ -25,17 +25,17 @@ class ThaiAnalyzerTestCase(BaseTokenStre
 
     def testOffsets(self):
         self._assertAnalyzesTo(ThaiAnalyzer(CharArraySet.EMPTY_SET),
-                               u"การที่ได้ต้องแสดงว่างานดี",
-                               [ u"การ", u"ที่", u"ได้", u"ต้อง", u"แสดง",
-                                 u"ว่า", u"งาน", u"ดี" ],
+                               "การที่ได้ต้องแสดงว่างานดี",
+                               [ "การ", "ที่", "ได้", "ต้อง", "แสดง",
+                                 "ว่า", "งาน", "ดี" ],
                                [ 0, 3, 6, 9, 13, 17, 20, 23 ],
                                [ 3, 6, 9, 13, 17, 20, 23, 25 ])
 
 
     def testStopWords(self):
         analyzer = ThaiAnalyzer()
-        self._assertAnalyzesTo(analyzer, u"การที่ได้ต้องแสดงว่างานดี",
-                               [ u"แสดง", u"งาน", u"ดี" ],
+        self._assertAnalyzesTo(analyzer, "การที่ได้ต้องแสดงว่างานดี",
+                               [ "แสดง", "งาน", "ดี" ],
                                [ 13, 20, 23 ],
                                [ 17, 23, 25 ],
                                [ 5, 2, 1 ])
@@ -44,16 +44,16 @@ class ThaiAnalyzerTestCase(BaseTokenStre
     def testPositionIncrements(self):
         analyzer = ThaiAnalyzer(StopAnalyzer.ENGLISH_STOP_WORDS_SET)
         self._assertAnalyzesTo(
-            analyzer, u"การที่ได้ต้อง the แสดงว่างานดี",
-            [ u"การ", u"ที่", u"ได้", u"ต้อง", u"แสดง", u"ว่า", u"งาน", u"ดี" ],
+            analyzer, "การที่ได้ต้อง the แสดงว่างานดี",
+            [ "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี" ],
             [ 0, 3, 6, 9, 18, 22, 25, 28 ],
             [ 3, 6, 9, 13, 22, 25, 28, 30 ],
             [ 1, 1, 1, 1, 2, 1, 1, 1 ])
 
         # case that a stopword is adjacent to thai text, with no whitespace
         self._assertAnalyzesTo(
-            analyzer, u"การที่ได้ต้องthe แสดงว่างานดี",
-            [ u"การ", u"ที่", u"ได้", u"ต้อง", u"แสดง", u"ว่า", u"งาน", u"ดี" ],
+            analyzer, "การที่ได้ต้องthe แสดงว่างานดี",
+            [ "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี" ],
             [ 0, 3, 6, 9, 17, 21, 24, 27 ],
             [ 3, 6, 9, 13, 21, 24, 27, 29 ],
             [ 1, 1, 1, 1, 2, 1, 1, 1 ])

Modified: lucene/pylucene/trunk/test3/test_bug1564.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_bug1564.py?rev=1787657&r1=1759171&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test3/test_bug1564.py (original)
+++ lucene/pylucene/trunk/test3/test_bug1564.py Sun Mar 19 20:29:12 2017
@@ -29,7 +29,7 @@ class Test_Bug1564(PyLuceneTestCase):
         writer = self.getWriter(analyzer=self.analyzer)
 
         doc = Document()
-        doc.add(Field('all', u'windowpane beplaster rapacious \
+        doc.add(Field('all', 'windowpane beplaster rapacious \
         catatonia gauntlet wynn depressible swede pick dressmake supreme \
         jeremy plumb theoretic bureaucracy causation chartres equipoise \
         dispersible careen heard', TextField.TYPE_NOT_STORED))

Modified: lucene/pylucene/trunk/test3/test_bug1842.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test3/test_bug1842.py?rev=1787657&r1=1759171&r2=1787657&view=diff
==============================================================================
--- lucene/pylucene/trunk/test3/test_bug1842.py (original)
+++ lucene/pylucene/trunk/test3/test_bug1842.py Sun Mar 19 20:29:12 2017
@@ -59,8 +59,8 @@ class Test_Bug1842(PyLuceneTestCase):
             terms.append(term.utf8ToString())
             freqs.append(termsEnum.totalTermFreq())
         terms.sort()
-        self.assert_(terms == ['blah', 'gesundheit'])
-        self.assert_(freqs == [3, 1])
+        self.assertTrue(terms == ['blah', 'gesundheit'])
+        self.assertTrue(freqs == [3, 1])
 
 if __name__ == '__main__':
     lucene.initVM(vmargs=['-Djava.awt.headless=true'])



Mime
View raw message