diff --git a/nimare/annotate/gclda.py b/nimare/annotate/gclda.py index 7e8a70626..940c2459b 100755 --- a/nimare/annotate/gclda.py +++ b/nimare/annotate/gclda.py @@ -179,7 +179,7 @@ def __init__( # Create docidx column count_df["docidx"] = count_df["id"].map(docidx_mapper) - count_df = count_df.drop("id", 1) + count_df = count_df.drop(columns=["id"]) # Remove words not found anywhere in the corpus n_terms = len(count_df.columns) - 1 # number of columns minus one for docidx diff --git a/nimare/annotate/text.py b/nimare/annotate/text.py index 5f9f71feb..5aaac04f2 100755 --- a/nimare/annotate/text.py +++ b/nimare/annotate/text.py @@ -59,7 +59,15 @@ def generate_counts(text_df, text_column="abstract", tfidf=True, min_df=50, max_ stop_words=stop_words, ) weights = vectorizer.fit_transform(text).toarray() - names = vectorizer.get_feature_names() + + if hasattr(vectorizer, "get_feature_names_out"): + # scikit-learn >= 1.0.0 + names = vectorizer.get_feature_names_out() + else: + # scikit-learn < 1.0.0 + # To remove when we drop support for 3.6 and increase minimum sklearn version to 1.0.0. + names = vectorizer.get_feature_names() + names = [str(name) for name in names] weights_df = pd.DataFrame(weights, columns=names, index=ids) weights_df.index.name = "id" diff --git a/nimare/tests/utils.py b/nimare/tests/utils.py index 1e6c5c97d..ecc74cf4a 100644 --- a/nimare/tests/utils.py +++ b/nimare/tests/utils.py @@ -87,7 +87,7 @@ def _check_p_values( for j in range(3) ] - best_chance_p_values = p_map[gtf_idx] + best_chance_p_values = p_map[tuple(gtf_idx)] assert all(best_chance_p_values < ALPHA) == good_sensitivity p_array_sig = p_array[sig_idx] diff --git a/pyproject.toml b/pyproject.toml index 6d09d30ce..456cb6d10 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,7 +26,9 @@ exclude = ''' [tool.pytest.ini_options] markers = [ - "performance: mark tests that measure performance (deselect with '-m \"not performance\"')", + "performance_smoke: mark smoke tests that measure performance", + "performance_estimators: mark tests that measure estimator performance", + "performance_correctors: mark tests that measure corrector performance", ] [tool.isort]