📅  最后修改于: 2022-03-11 14:46:40.232000             🧑  作者: Mango
from nltk.corpus import stopwords
final_stopwords_list = stopwords.words('english') + stopwords.words('french')
tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.2, stop_words=final_stopwords_list, use_idf=True, tokenizer=tokenize_and_stem, ngram_range(1,3))