nlparrot

natural language processing server
Log | Files | Refs | README | LICENSE

commit 85308ffb9cacec3324f1572106ea5e352828bafd
parent 3158f9f15349ff7cfa0838714511f8c6c930b81f
Author: Stefan Koch <programming@stefan-koch.name>
Date:   Sat,  5 Aug 2023 14:01:49 +0200

apply isort import sorting

Diffstat:
Msrc/nlparrot/server.py | 4++--
Msrc/nlparrot/tokenization/croatian.py | 6++----
Msrc/nlparrot/tokenization/factory.py | 5+----
Msrc/nlparrot/tokenization/japanese.py | 9+++------
Mtests/tokenization/test_croatian.py | 4+---
Mtests/tokenization/test_japanese.py | 5+----
6 files changed, 10 insertions(+), 23 deletions(-)

diff --git a/src/nlparrot/server.py b/src/nlparrot/server.py @@ -1,9 +1,9 @@ -from multiprocessing.connection import Listener import os +from multiprocessing.connection import Listener from nlparrot.detection import detect_language -from nlparrot.tokenization.factory import get_tokenizers from nlparrot.readability import flesch_reading_ease +from nlparrot.tokenization.factory import get_tokenizers def get_listen_address(): diff --git a/src/nlparrot/tokenization/croatian.py b/src/nlparrot/tokenization/croatian.py @@ -1,10 +1,8 @@ import collections.abc + import stanza -from .generic import ( - Token, - Tokenizer, -) +from .generic import Token, Tokenizer class CroatianVocabularyTokenizer(Tokenizer): diff --git a/src/nlparrot/tokenization/factory.py b/src/nlparrot/tokenization/factory.py @@ -1,8 +1,5 @@ from .croatian import CroatianVocabularyTokenizer -from .japanese import ( - JapaneseKanjiTokenizer, - JapaneseWordTokenizer, -) +from .japanese import JapaneseKanjiTokenizer, JapaneseWordTokenizer def get_tokenizers(language_code: str): diff --git a/src/nlparrot/tokenization/japanese.py b/src/nlparrot/tokenization/japanese.py @@ -1,11 +1,8 @@ import collections.abc -from sudachipy import tokenizer -from sudachipy import dictionary -from .generic import ( - Token, - Tokenizer, -) +from sudachipy import dictionary, tokenizer + +from .generic import Token, Tokenizer class JapaneseKanjiTokenizer(Tokenizer): diff --git a/tests/tokenization/test_croatian.py b/tests/tokenization/test_croatian.py @@ -1,7 +1,5 @@ +from nlparrot.tokenization.croatian import CroatianVocabularyTokenizer from nlparrot.tokenization.generic import Token -from nlparrot.tokenization.croatian import ( - CroatianVocabularyTokenizer, -) def test_croatian_vocabulary_tokenizer_keeps_whitespace(): diff --git a/tests/tokenization/test_japanese.py b/tests/tokenization/test_japanese.py @@ -1,8 +1,5 @@ from nlparrot.tokenization.generic import Token -from nlparrot.tokenization.japanese import ( - JapaneseKanjiTokenizer, - JapaneseWordTokenizer, -) +from nlparrot.tokenization.japanese import JapaneseKanjiTokenizer, JapaneseWordTokenizer def test_japanese_kanji_tokenizer():