diff --git a/pandas/_typing.py b/pandas/_typing.py index 2799dac51370e..c9af531fd90b7 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -83,8 +83,7 @@ # numpy compatible types NumpyValueArrayLike: TypeAlias = ScalarLike_co | npt.ArrayLike - # Name "npt._ArrayLikeInt_co" is not defined [name-defined] - NumpySorter: TypeAlias = npt._ArrayLikeInt_co | None # type: ignore[name-defined] + NumpySorter: TypeAlias = npt._ArrayLikeInt_co | None P = ParamSpec("P") diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index d8953da5490cd..bbca78459ca75 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -215,14 +215,15 @@ def _reconstruct_data( # that values.dtype == dtype cls = dtype.construct_array_type() - # error: Incompatible types in assignment (expression has type - # "ExtensionArray", variable has type "ndarray[Any, Any]") - values = cls._from_sequence(values, dtype=dtype) # type: ignore[assignment] - - else: - values = values.astype(dtype, copy=False) - - return values + # error: Incompatible return value type + # (got "ExtensionArray", + # expected "ndarray[tuple[Any, ...], dtype[Any]]") + return cls._from_sequence(values, dtype=dtype) # type: ignore[return-value] + + # error: Incompatible return value type + # (got "ndarray[tuple[Any, ...], dtype[Any]]", + # expected "ExtensionArray") + return values.astype(dtype, copy=False) # type: ignore[return-value] def _ensure_arraylike(values, func_name: str) -> ArrayLike: diff --git a/pandas/core/array_algos/quantile.py b/pandas/core/array_algos/quantile.py index e15a986950cce..8a920d1849bb3 100644 --- a/pandas/core/array_algos/quantile.py +++ b/pandas/core/array_algos/quantile.py @@ -102,7 +102,7 @@ def quantile_with_mask( interpolation=interpolation, ) - result = np.asarray(result) # type: ignore[assignment] + result = np.asarray(result) result = result.T return result diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 0c2ef797ff044..84aca81420fe1 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -151,7 +151,9 @@ def view(self, dtype: Dtype | None = None) -> ArrayLike: td64_values = arr.view(dtype) return TimedeltaArray._simple_new(td64_values, dtype=dtype) - return arr.view(dtype=dtype) + # error: Argument "dtype" to "view" of "ndarray" has incompatible type + # "ExtensionDtype | dtype[Any]"; expected "dtype[Any] | _HasDType[dtype[Any]]" + return arr.view(dtype=dtype) # type: ignore[arg-type] def take( self, diff --git a/pandas/core/arrays/arrow/_arrow_utils.py b/pandas/core/arrays/arrow/_arrow_utils.py index 7da83e2257e30..285c3fd465ffc 100644 --- a/pandas/core/arrays/arrow/_arrow_utils.py +++ b/pandas/core/arrays/arrow/_arrow_utils.py @@ -44,7 +44,7 @@ def pyarrow_array_to_numpy_and_mask( mask = pyarrow.BooleanArray.from_buffers( pyarrow.bool_(), len(arr), [None, bitmask], offset=arr.offset ) - mask = np.asarray(mask) # type: ignore[assignment] + mask = np.asarray(mask) else: mask = np.ones(len(arr), dtype=bool) return data, mask diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index db6b58c8dbc7f..0f7cd792f7ee8 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -657,7 +657,7 @@ def _box_pa_array( ): arr_value = np.asarray(value, dtype=object) # similar to isna(value) but exclude NaN, NaT, nat-like, nan-like - mask = is_pdna_or_none(arr_value) # type: ignore[assignment] + mask = is_pdna_or_none(arr_value) try: pa_array = pa.array(value, type=pa_type, mask=mask) @@ -2738,7 +2738,7 @@ def _str_get_dummies(self, sep: str = "|", dtype: NpDtype | None = None): dummies_dtype = np.bool_ dummies = np.zeros(n_rows * n_cols, dtype=dummies_dtype) dummies[indices] = True - dummies = dummies.reshape((n_rows, n_cols)) # type: ignore[assignment] + dummies = dummies.reshape((n_rows, n_cols)) result = self._from_pyarrow_array(pa.array(list(dummies))) return result, uniques_sorted.to_pylist() diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 4b5d2acf008a8..6b998fbcfc1a0 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1869,7 +1869,7 @@ def value_counts(self, dropna: bool = True) -> Series: count = np.bincount(obs, minlength=ncat or 0) else: count = np.bincount(np.where(mask, code, ncat)) - ix = np.append(ix, -1) # type: ignore[assignment] + ix = np.append(ix, -1) ix = coerce_indexer_dtype(ix, self.dtype.categories) ix_categorical = self._from_backing_data(ix) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 38be038efcaa5..7e0b91365ce02 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -804,7 +804,11 @@ def _add_offset(self, offset: BaseOffset) -> Self: try: res_values = offset._apply_array(values._ndarray) if res_values.dtype.kind == "i": - res_values = res_values.view(values.dtype) + # error: Argument 1 to "view" of "ndarray" has + # incompatible type + # "dtype[datetime64[date | int | None]] | DatetimeTZDtype"; + # expected "dtype[Any] | _HasDType[dtype[Any]]" [arg-type] + res_values = res_values.view(values.dtype) # type: ignore[arg-type] except NotImplementedError: if get_option("performance_warnings"): warnings.warn( diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 4801c21aa325a..b38eaa4072796 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -764,7 +764,7 @@ def _cast_pointwise_result(self, values) -> ArrayLike: result = super()._cast_pointwise_result(values) if isinstance(result.dtype, StringDtype): # Ensure we retain our same na_value/storage - result = result.astype(self.dtype) # type: ignore[call-overload] + result = result.astype(self.dtype) return result @classmethod diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 1fc2d5535ad9e..b1181a7541e7a 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1886,7 +1886,7 @@ def _apply_filter(self, indices, dropna): mask.fill(False) mask[indices.astype(int)] = True # mask fails to broadcast when passed to where; broadcast manually. - mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T # type: ignore[assignment] + mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T filtered = self._selected_obj.where(mask) # Fill with NaNs. return filtered diff --git a/pandas/core/indexers/objects.py b/pandas/core/indexers/objects.py index 6fc638e85bc5e..88379164534f2 100644 --- a/pandas/core/indexers/objects.py +++ b/pandas/core/indexers/objects.py @@ -131,8 +131,8 @@ def get_window_bounds( if closed in ["left", "neither"]: end -= 1 - end = np.clip(end, 0, num_values) # type: ignore[assignment] - start = np.clip(start, 0, num_values) # type: ignore[assignment] + end = np.clip(end, 0, num_values) + start = np.clip(start, 0, num_values) return start, end @@ -402,7 +402,7 @@ def get_window_bounds( start = np.arange(0, num_values, step, dtype="int64") end = start + self.window_size if self.window_size: - end = np.clip(end, 0, num_values) # type: ignore[assignment] + end = np.clip(end, 0, num_values) return start, end @@ -488,7 +488,7 @@ def get_window_bounds( ) window_indices_start += len(indices) # Extend as we'll be slicing window like [start, end) - window_indices = np.append(window_indices, [window_indices[-1] + 1]).astype( # type: ignore[assignment] + window_indices = np.append(window_indices, [window_indices[-1] + 1]).astype( np.int64, copy=False ) start_arrays.append(window_indices.take(ensure_platform_int(start))) diff --git a/pandas/core/methods/describe.py b/pandas/core/methods/describe.py index 944e28a9b0238..bbeaa98178b68 100644 --- a/pandas/core/methods/describe.py +++ b/pandas/core/methods/describe.py @@ -353,14 +353,11 @@ def _refine_percentiles( if percentiles is None: return np.array([0.25, 0.5, 0.75]) - # explicit conversion of `percentiles` to list - percentiles = list(percentiles) + percentiles = np.asarray(percentiles) # get them all to be in [0, 1] validate_percentile(percentiles) - percentiles = np.asarray(percentiles) - # sort and check for duplicates unique_pcts = np.unique(percentiles) assert percentiles is not None diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 7bcf4371a0bcd..2c3b70f7efd2e 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -651,9 +651,7 @@ def _mask_datetimelike_result( # we need to apply the mask result = result.astype("i8").view(orig_values.dtype) axis_mask = mask.any(axis=axis) - # error: Unsupported target for indexed assignment ("Union[ndarray[Any, Any], - # datetime64, timedelta64]") - result[axis_mask] = iNaT # type: ignore[index] + result[axis_mask] = iNaT else: if mask.any(): return np.int64(iNaT).view(orig_values.dtype) diff --git a/pandas/core/reshape/encoding.py b/pandas/core/reshape/encoding.py index 67fb075110f0d..775dd9ca38ca3 100644 --- a/pandas/core/reshape/encoding.py +++ b/pandas/core/reshape/encoding.py @@ -359,7 +359,7 @@ def get_empty_frame(data) -> DataFrame: if drop_first: # remove first GH12042 - dummy_mat = dummy_mat[:, 1:] # type: ignore[assignment] + dummy_mat = dummy_mat[:, 1:] dummy_cols = dummy_cols[1:] return DataFrame(dummy_mat, index=index, columns=dummy_cols, dtype=_dtype) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index f35b0ef197288..f40a4b5d60ecd 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1746,7 +1746,9 @@ def _maybe_coerce_merge_keys(self) -> None: mask = ~np.isnan(lk) match = lk == casted - if not match[mask].all(): + # error: Item "ExtensionArray" of + # "ExtensionArray | Any" has no attribute "all" + if not match[mask].all(): # type: ignore[union-attr] warnings.warn( "You are merging on int and float " "columns where the float values " @@ -1766,7 +1768,9 @@ def _maybe_coerce_merge_keys(self) -> None: mask = ~np.isnan(rk) match = rk == casted - if not match[mask].all(): + # error: Item "ExtensionArray" of + # "ExtensionArray | Any" has no attribute "all" + if not match[mask].all(): # type: ignore[union-attr] warnings.warn( "You are merging on int and float " "columns where the float values " diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py index 895706f501c15..3c4ad1d50f85e 100644 --- a/pandas/core/util/hashing.py +++ b/pandas/core/util/hashing.py @@ -324,8 +324,8 @@ def _hash_ndarray( ) codes, categories = factorize(vals, sort=False) - dtype = CategoricalDtype(categories=Index(categories), ordered=False) - cat = Categorical._simple_new(codes, dtype) + tdtype = CategoricalDtype(categories=Index(categories), ordered=False) + cat = Categorical._simple_new(codes, tdtype) return cat._hash_pandas_object( encoding=encoding, hash_key=hash_key, categorize=False ) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 088cae8e20524..259caf984a84c 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -3301,18 +3301,16 @@ def write_array( # store as UTC # with a zone - # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no - # attribute "asi8" + # error: "ExtensionArray" has no attribute "asi8" self._handle.create_array( self.group, key, - value.asi8, # type: ignore[union-attr] + value.asi8, # type: ignore[attr-defined] ) node = getattr(self.group, key) - # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no - # attribute "tz" - node._v_attrs.tz = _get_tz(value.tz) # type: ignore[union-attr] + # error: "ExtensionArray" has no attribute "tz" + node._v_attrs.tz = _get_tz(value.tz) # type: ignore[attr-defined] node._v_attrs.value_type = f"datetime64[{value.dtype.unit}]" elif lib.is_np_dtype(value.dtype, "m"): self._handle.create_array(self.group, key, value.view("i8")) @@ -5195,8 +5193,7 @@ def _maybe_convert_for_string_atom( columns: list[str], ): if isinstance(bvalues.dtype, StringDtype): - # "ndarray[Any, Any]" has no attribute "to_numpy" - bvalues = bvalues.to_numpy() # type: ignore[union-attr] + bvalues = bvalues.to_numpy() if bvalues.dtype != object: return bvalues

AltStyle によって変換されたページ (->オリジナル) /