From 050165f5f7b06f70584cbaa9c25e6f8a17b71226 Mon Sep 17 00:00:00 2001 From: monster29000 <1067752807@qq.com> Date: Sat, 17 Aug 2024 08:28:56 +0800 Subject: [PATCH] It is recommended to use np.asarray instead of np.array to avoid unnecessary copies of the data --- lib/align/aligned_face.py | 4 +- lib/align/aligned_mask.py | 10 ++-- lib/align/constants.py | 6 +-- lib/align/detected_face.py | 8 ++-- lib/align/pose.py | 16 +++---- lib/align/updater.py | 2 +- lib/convert.py | 8 ++-- lib/gui/analysis/event_reader.py | 4 +- lib/gui/analysis/stats.py | 10 ++-- lib/gui/custom_widgets.py | 4 +- lib/gui/utils/image.py | 6 +-- lib/image.py | 2 +- lib/keras_utils.py | 4 +- lib/model/autoclip.py | 2 +- lib/model/initializers.py | 2 +- lib/model/losses/feature_loss.py | 4 +- lib/model/losses/perceptual_loss.py | 4 +- lib/training/augmentation.py | 14 +++--- lib/training/generator.py | 16 +++---- plugins/convert/color/manual_balance.py | 2 +- plugins/convert/mask/mask_blend.py | 2 +- plugins/convert/writer/patch.py | 10 ++-- plugins/extract/_base.py | 4 +- plugins/extract/align/_base/aligner.py | 20 ++++---- plugins/extract/align/_base/processing.py | 10 ++-- plugins/extract/align/cv2_dnn.py | 4 +- plugins/extract/align/external.py | 8 ++-- plugins/extract/align/fan.py | 4 +- plugins/extract/detect/_base.py | 16 +++---- plugins/extract/detect/cv2_dnn.py | 2 +- plugins/extract/detect/external.py | 10 ++-- plugins/extract/detect/mtcnn.py | 18 ++++---- plugins/extract/detect/s3fd.py | 10 ++-- plugins/extract/mask/bisenet_fp.py | 2 +- plugins/extract/mask/components.py | 2 +- plugins/extract/mask/extended.py | 6 +-- plugins/extract/mask/unet_dfl.py | 2 +- plugins/extract/mask/vgg_clear.py | 2 +- plugins/extract/recognition/_base.py | 6 +-- plugins/extract/recognition/vgg_face2.py | 4 +- plugins/train/model/_base/model.py | 4 +- plugins/train/model/dfl_sae.py | 2 +- plugins/train/trainer/_base.py | 6 +-- scripts/convert.py | 4 +- scripts/extract.py | 18 ++++---- scripts/fsmedia.py | 2 +- tests/lib/gui/stats/event_reader_test.py | 46 +++++++++---------- tests/tools/alignments/media_test.py | 6 +-- tests/utils.py | 2 +- tools/alignments/jobs.py | 2 +- tools/alignments/jobs_faces.py | 2 +- tools/alignments/jobs_frames.py | 6 +-- tools/manual/detected_faces.py | 2 +- tools/manual/faceviewer/frame.py | 8 ++-- tools/manual/faceviewer/interact.py | 6 +-- tools/manual/faceviewer/viewport.py | 10 ++-- tools/manual/frameviewer/editor/_base.py | 2 +- .../manual/frameviewer/editor/bounding_box.py | 6 +-- .../manual/frameviewer/editor/extract_box.py | 28 +++++------ tools/manual/frameviewer/editor/landmarks.py | 12 ++--- tools/manual/frameviewer/editor/mask.py | 24 +++++----- tools/preview/viewer.py | 4 +- tools/sort/sort_methods.py | 16 +++---- tools/sort/sort_methods_aligned.py | 2 +- 64 files changed, 245 insertions(+), 245 deletions(-) diff --git a/lib/align/aligned_face.py b/lib/align/aligned_face.py index 41f2eed8c3..ebf2b78982 100644 --- a/lib/align/aligned_face.py +++ b/lib/align/aligned_face.py @@ -369,7 +369,7 @@ def original_roi(self) -> np.ndarray: frame. """ with self._cache.lock("original_roi"): if self._cache.original_roi is None: - roi = np.array([[0, 0], + roi = np.asarray([[0, 0], [0, self._size - 1], [self._size - 1, self._size - 1], [self._size - 1, 0]]) @@ -650,7 +650,7 @@ def get_cropped_roi(self, self.pose.offset[centering], self._source_centering) padding = target_size // 2 - roi = np.array([center - padding, center + padding]).ravel() + roi = np.asarray([center - padding, center + padding]).ravel() logger.trace( # type:ignore[attr-defined] "centering: '%s', center: %s, padding: %s, sub roi: %s", centering, center, padding, roi) diff --git a/lib/align/aligned_mask.py b/lib/align/aligned_mask.py index 6a34060653..c861920ce6 100644 --- a/lib/align/aligned_mask.py +++ b/lib/align/aligned_mask.py @@ -105,7 +105,7 @@ def stored_mask(self) -> np.ndarray: def original_roi(self) -> np.ndarray: """ :class: `numpy.ndarray`: The original region of interest of the mask in the source frame. """ - points = np.array([[0, 0], + points = np.asarray([[0, 0], [0, self.stored_size - 1], [self.stored_size - 1, self.stored_size - 1], [self.stored_size - 1, 0]], np.int32).reshape((-1, 1, 2)) @@ -287,7 +287,7 @@ def set_sub_crop(self, centering, self.stored_size, coverage_ratio=coverage_ratio) - roi = np.array([center - crop_size // 2, center + crop_size // 2]).ravel() + roi = np.asarray([center - crop_size // 2, center + crop_size // 2]).ravel() self._sub_crop_size = crop_size self._sub_crop_slices["in"] = [slice(max(roi[1], 0), max(roi[3], 0)), @@ -318,8 +318,8 @@ def _adjust_affine_matrix(self, mask_size: int, affine_matrix: np.ndarray) -> np The affine matrix adjusted for the mask at its stored dimensions. """ zoom = self.stored_size / mask_size - zoom_mat = np.array([[zoom, 0, 0.], [0, zoom, 0.]]) - adjust_mat = np.dot(zoom_mat, np.concatenate((affine_matrix, np.array([[0., 0., 1.]])))) + zoom_mat = np.asarray([[zoom, 0, 0.], [0, zoom, 0.]]) + adjust_mat = np.dot(zoom_mat, np.concatenate((affine_matrix, np.asarray([[0., 0., 1.]])))) logger.trace("storage_size: %s, mask_size: %s, zoom: %s, " # type:ignore[attr-defined] "original matrix: %s, adjusted_matrix: %s", self.stored_size, mask_size, zoom, affine_matrix.shape, adjust_mat.shape) @@ -374,7 +374,7 @@ def from_dict(self, mask_dict: MaskAlignmentsFileDict) -> None: self._mask = mask_dict["mask"] affine_matrix = mask_dict["affine_matrix"] self._affine_matrix = (affine_matrix if isinstance(affine_matrix, np.ndarray) - else np.array(affine_matrix, dtype="float64")) + else np.asarray(affine_matrix, dtype="float64")) self._interpolator = mask_dict["interpolator"] self.stored_size = mask_dict["stored_size"] centering = mask_dict.get("stored_centering") diff --git a/lib/align/constants.py b/lib/align/constants.py index 6d5b484e59..7ea8629804 100644 --- a/lib/align/constants.py +++ b/lib/align/constants.py @@ -50,9 +50,9 @@ def from_shape(cls, shape: tuple[int, ...]) -> LandmarkType: _MEAN_FACE: dict[LandmarkType, np.ndarray] = { - LandmarkType.LM_2D_4: np.array( + LandmarkType.LM_2D_4: np.asarray( [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]]), # Clockwise from TL - LandmarkType.LM_2D_51: np.array([ + LandmarkType.LM_2D_51: np.asarray([ [0.010086, 0.106454], [0.085135, 0.038915], [0.191003, 0.018748], [0.300643, 0.034489], [0.403270, 0.077391], [0.596729, 0.077391], [0.699356, 0.034489], [0.808997, 0.018748], [0.914864, 0.038915], [0.989913, 0.106454], [0.500000, 0.203352], [0.500000, 0.307009], @@ -66,7 +66,7 @@ def from_shape(cls, shape: tuple[int, ...]) -> LandmarkType: [0.500000, 0.909281], [0.433405, 0.902192], [0.347967, 0.864805], [0.300252, 0.784792], [0.437969, 0.778746], [0.500000, 0.785343], [0.562030, 0.778746], [0.699747, 0.784792], [0.563237, 0.824182], [0.500000, 0.831803], [0.436763, 0.824182]]), - LandmarkType.LM_3D_26: np.array([ + LandmarkType.LM_3D_26: np.asarray([ [4.056931, -11.432347, 1.636229], # 8 chin LL [1.833492, -12.542305, 4.061275], # 7 chin L [0.0, -12.901019, 4.070434], # 6 chin C diff --git a/lib/align/detected_face.py b/lib/align/detected_face.py index efd4475ab2..a9374e2aad 100644 --- a/lib/align/detected_face.py +++ b/lib/align/detected_face.py @@ -350,8 +350,8 @@ def from_alignment(self, alignment: AlignmentFileDict, self.height = alignment["h"] landmarks = alignment["landmarks_xy"] if not isinstance(landmarks, np.ndarray): - landmarks = np.array(landmarks, dtype="float32") - self._identity = {T.cast(T.Literal["vggface2"], k): np.array(v, dtype="float32") + landmarks = np.asarray(landmarks, dtype="float32") + self._identity = {T.cast(T.Literal["vggface2"], k): np.asarray(v, dtype="float32") for k, v in alignment.get("identity", {}).items()} self._landmarks_xy = landmarks.copy() @@ -404,7 +404,7 @@ def from_png_meta(self, alignment: PNGHeaderAlignmentsDict) -> None: self.width = alignment["w"] self.top = alignment["y"] self.height = alignment["h"] - self._landmarks_xy = np.array(alignment["landmarks_xy"], dtype="float32") + self._landmarks_xy = np.asarray(alignment["landmarks_xy"], dtype="float32") self.mask = {} for name, mask_dict in alignment["mask"].items(): self.mask[name] = Mask() @@ -412,7 +412,7 @@ def from_png_meta(self, alignment: PNGHeaderAlignmentsDict) -> None: self._identity = {} for key, val in alignment.get("identity", {}).items(): assert key in ["vggface2"] - self._identity[T.cast(T.Literal["vggface2"], key)] = np.array(val, dtype="float32") + self._identity[T.cast(T.Literal["vggface2"], key)] = np.asarray(val, dtype="float32") logger.trace("Created from png exif header: (left: %s, " # type:ignore[attr-defined] "width: %s, top: %s height: %s, landmarks: %s, mask: %s, identity: %s)", self.left, self.width, self.top, self.height, self.landmarks_xy, self.mask, diff --git a/lib/align/pose.py b/lib/align/pose.py index fe186a3f5c..a31c8ffad3 100644 --- a/lib/align/pose.py +++ b/lib/align/pose.py @@ -55,7 +55,7 @@ def xyz_2d(self) -> np.ndarray: """ :class:`numpy.ndarray` projected (x, y) coordinates for each x, y, z point at a constant distance from adjusted center of the skull (0.5, 0.5) in the 2D space. """ if self._xyz_2d is None: - xyz = cv2.projectPoints(np.array([[6., 0., -2.3], + xyz = cv2.projectPoints(np.asarray([[6., 0., -2.3], [0., 6., -2.3], [0., 0., 3.7]]).astype("float32"), self._rotation, @@ -119,7 +119,7 @@ def _get_camera_matrix(cls) -> np.ndarray: An estimated camera matrix """ focal_length = 4 - camera_matrix = np.array([[focal_length, 0, 0.5], + camera_matrix = np.asarray([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], dtype="double") logger.trace("camera_matrix: %s", camera_matrix) # type:ignore[attr-defined] @@ -145,7 +145,7 @@ def _solve_pnp(self, landmarks: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """ if self._landmarks_type != LandmarkType.LM_2D_68: points: np.ndarray = np.empty([]) - rotation = np.array([[0.0], [0.0], [0.0]]) + rotation = np.asarray([[0.0], [0.0], [0.0]]) translation = rotation.copy() else: points = landmarks[[6, 7, 8, 9, 10, 17, 21, 22, 26, 31, 32, 33, 34, @@ -168,20 +168,20 @@ def _get_offset(self) -> dict[CenteringType, np.ndarray]: :class:`numpy.ndarray` The x, y offset of the new center from the old center. """ - offset: dict[CenteringType, np.ndarray] = {"legacy": np.array([0.0, 0.0])} + offset: dict[CenteringType, np.ndarray] = {"legacy": np.asarray([0.0, 0.0])} if self._landmarks_type != LandmarkType.LM_2D_68: - offset["face"] = np.array([0.0, 0.0]) - offset["head"] = np.array([0.0, 0.0]) + offset["face"] = np.asarray([0.0, 0.0]) + offset["head"] = np.asarray([0.0, 0.0]) else: points: dict[T.Literal["face", "head"], tuple[float, ...]] = {"head": (0.0, 0.0, -2.3), "face": (0.0, -1.5, 4.2)} for key, pnts in points.items(): - center = cv2.projectPoints(np.array([pnts]).astype("float32"), + center = cv2.projectPoints(np.asarray([pnts]).astype("float32"), self._rotation, self._translation, self._camera_matrix, self._distortion_coefficients)[0].squeeze() logger.trace("center %s: %s", key, center) # type:ignore[attr-defined] - offset[key] = center - np.array([0.5, 0.5]) + offset[key] = center - np.asarray([0.5, 0.5]) logger.trace("offset: %s", offset) # type:ignore[attr-defined] return offset diff --git a/lib/align/updater.py b/lib/align/updater.py index 7a98e3c8fc..3f274de3d8 100644 --- a/lib/align/updater.py +++ b/lib/align/updater.py @@ -248,7 +248,7 @@ def update(self) -> int: for alignment in val["faces"]: test = alignment["landmarks_xy"] if not isinstance(test, np.ndarray): - alignment["landmarks_xy"] = np.array(test, dtype="float32") + alignment["landmarks_xy"] = np.asarray(test, dtype="float32") update_count += 1 return update_count diff --git a/lib/convert.py b/lib/convert.py index c96b759213..d4b814986c 100644 --- a/lib/convert.py +++ b/lib/convert.py @@ -239,7 +239,7 @@ def _get_warp_matrix(self, matrix: np.ndarray, size: int) -> np.ndarray: else: mat = matrix * self._face_scale patch_center = (size / 2, size / 2) - mat[..., 2] += (1 - self._face_scale) * np.array(patch_center) + mat[..., 2] += (1 - self._face_scale) * np.asarray(patch_center) return mat @@ -282,7 +282,7 @@ def _patch_image(self, predicted: ConvertItem) -> np.ndarray | list[bytes]: kwargs: dict[str, T.Any] = {} if self.cli_arguments.writer == "patch": kwargs["canvas_size"] = (background.shape[1], background.shape[0]) - kwargs["matrices"] = np.array([self._get_warp_matrix(face.adjusted_matrix, + kwargs["matrices"] = np.asarray([self._get_warp_matrix(face.adjusted_matrix, patched_face.shape[1]) for face in predicted.reference_faces], dtype="float32") @@ -349,7 +349,7 @@ def _get_new_image(self, placeholder = np.zeros((frame_size[1], frame_size[0], 4), dtype="float32") if self._full_frame_output: - background = predicted.inbound.image / np.array(255.0, dtype="float32") + background = predicted.inbound.image / np.asarray(255.0, dtype="float32") placeholder[:, :, :3] = background else: faces = [] # Collect the faces into final array @@ -373,7 +373,7 @@ def _get_new_image(self, faces.append(new_face) if not self._full_frame_output: - placeholder = np.array(faces, dtype="float32") + placeholder = np.asarray(faces, dtype="float32") logger.trace("Got filename: '%s'. (placeholders: %s)", # type: ignore[attr-defined] predicted.inbound.filename, placeholder.shape) diff --git a/lib/gui/analysis/event_reader.py b/lib/gui/analysis/event_reader.py index 60aa45e6e3..5af64d517b 100644 --- a/lib/gui/analysis/event_reader.py +++ b/lib/gui/analysis/event_reader.py @@ -325,7 +325,7 @@ def _to_numpy(self, del loss[idx] del times[idx] - n_times, n_loss = (np.array(times, dtype="float64"), np.array(loss, dtype="float32")) + n_times, n_loss = (np.asarray(times, dtype="float64"), np.asarray(loss, dtype="float32")) logger.debug("Converted to numpy: (data points: %s, timestamps shape: %s, loss shape: %s)", len(data), n_times.shape, n_loss.shape) @@ -756,7 +756,7 @@ def _get_outputs(cls, model_config: dict[str, T.Any]) -> np.ndarray: :class:`numpy.ndarray` The layer output names, their instance index and their output index """ - outputs = np.array(model_config["output_layers"]) + outputs = np.asarray(model_config["output_layers"]) logger.debug("Obtained model outputs: %s, shape: %s", outputs, outputs.shape) if outputs.ndim == 2: # Insert extra dimension for non learn mask models outputs = np.expand_dims(outputs, axis=1) diff --git a/lib/gui/analysis/stats.py b/lib/gui/analysis/stats.py index b055cfa332..eab3459ea3 100644 --- a/lib/gui/analysis/stats.py +++ b/lib/gui/analysis/stats.py @@ -191,7 +191,7 @@ def get_loss(self, session_id: int | None) -> dict[str, np.ndarray]: for key in sorted(loss_dict): for loss_key, loss in loss_dict[key].items(): all_loss.setdefault(loss_key, []).extend(loss) - retval: dict[str, np.ndarray] = {key: np.array(val, dtype="float32") + retval: dict[str, np.ndarray] = {key: np.asarray(val, dtype="float32") for key, val in all_loss.items()} else: retval = loss_dict.get(session_id, {}) @@ -671,7 +671,7 @@ def _get_raw(self) -> None: if len(iterations) > 1: # Crop all losses to the same number of items if self._iterations == 0: - self._stats = {lossname: np.array([], dtype=loss.dtype) + self._stats = {lossname: np.asarray([], dtype=loss.dtype) for lossname, loss in self.stats.items()} else: self._stats = {lossname: loss[:self._iterations] @@ -760,7 +760,7 @@ def _calc_rate_total(cls) -> np.ndarray: batchsize = batchsizes[sess_id] timestamps = total_timestamps[sess_id] rate.extend((batchsize * 2) / np.diff(timestamps)) - retval = np.array(rate) + retval = np.asarray(rate) logger.debug("Calculated totals rate: Item_count: %s", len(retval)) return retval @@ -801,7 +801,7 @@ def _calc_avg(self, data: np.ndarray) -> np.ndarray: if datapoints <= (self._args["avg_samples"] * 2): logger.info("Not enough data to compile rolling average") - return np.array([], dtype="float64") + return np.asarray([], dtype="float64") avgs = np.cumsum(np.nan_to_num(data), dtype="float64") avgs[window:] = avgs[window:] - avgs[:-window] @@ -984,7 +984,7 @@ def _ewma_vectorized(self, out /= scaling_factors[-2::-1] # cumulative sums / scaling if offset != 0: - noffset = np.array(offset, copy=False).astype(self._dtype, copy=False) + noffset = np.asarray(offset, copy=False).astype(self._dtype, copy=False) out += noffset * scaling_factors[1:] def _ewma_vectorized_2d(self, data: np.ndarray, out: np.ndarray) -> None: diff --git a/lib/gui/custom_widgets.py b/lib/gui/custom_widgets.py index d18d832ed8..0c143e358d 100644 --- a/lib/gui/custom_widgets.py +++ b/lib/gui/custom_widgets.py @@ -841,10 +841,10 @@ def __init__(self, title, total): self._lbl_title = self._set_title(title) self._progress_bar = self._get_progress_bar() - offset = np.array((self.master.winfo_rootx(), self.master.winfo_rooty())) + offset = np.asarray((self.master.winfo_rootx(), self.master.winfo_rooty())) # TODO find way to get dimensions of the pop up without it flicking onto the screen self.update_idletasks() - center = np.array(( + center = np.asarray(( (self.master.winfo_width() // 2) - (self.winfo_width() // 2), (self.master.winfo_height() // 2) - (self.winfo_height() // 2))) + offset self.wm_geometry(f"+{center[0]}+{center[1]}") diff --git a/lib/gui/utils/image.py b/lib/gui/utils/image.py index 816ba19257..ab5e54f5a9 100644 --- a/lib/gui/utils/image.py +++ b/lib/gui/utils/image.py @@ -272,7 +272,7 @@ def _pad_and_border(self, image: Image.Image, size: int) -> np.ndarray: image = new_img draw = ImageDraw.Draw(image) draw.rectangle(((0, 0), (size, size)), outline="#E5E5E5", width=1) - retval = np.array(image) + retval = np.asarray(image) logger.trace("image shape: %s", retval.shape) # type: ignore return retval @@ -296,7 +296,7 @@ def _process_samples(self, bool ``True`` if samples succesfully compiled otherwise ``False`` """ - asamples = np.array(samples) + asamples = np.asarray(samples) if not np.any(asamples): logger.debug("No preview images collected.") return False @@ -397,7 +397,7 @@ def _create_placeholder(self, thumbnail_size: int) -> None: placeholder = Image.new("RGB", (thumbnail_size, thumbnail_size)) draw = ImageDraw.Draw(placeholder) draw.rectangle(((0, 0), (thumbnail_size, thumbnail_size)), outline="#E5E5E5", width=1) - placeholder = np.array(placeholder) + placeholder = np.asarray(placeholder) self._placeholder = placeholder logger.debug("Created placeholder. shape: %s", placeholder.shape) diff --git a/lib/image.py b/lib/image.py index 897d8e2daa..cc659bc848 100644 --- a/lib/image.py +++ b/lib/image.py @@ -376,7 +376,7 @@ def read_image_batch(filenames, with_metadata=False): else: batch[ret_idx] = future.result() - batch = np.array(batch) + batch = np.asarray(batch) retval = (batch, meta) if with_metadata else batch logger.trace("Returning images: (filenames: %s, batch shape: %s, with_metadata: %s)", filenames, batch.shape, with_metadata) diff --git a/lib/keras_utils.py b/lib/keras_utils.py index af47a3e466..fc9b3b3c96 100644 --- a/lib/keras_utils.py +++ b/lib/keras_utils.py @@ -109,7 +109,7 @@ def __init__(self, from_space: str, to_space: str) -> None: raise ValueError(f"The color transform {from_space} to {to_space} is not defined.") self._func = functions[func_name] - self._ref_illuminant = K.constant(np.array([[[0.950428545, 1.000000000, 1.088900371]]]), + self._ref_illuminant = K.constant(np.asarray([[[0.950428545, 1.000000000, 1.088900371]]]), dtype="float32") self._inv_ref_illuminant = 1. / self._ref_illuminant @@ -125,7 +125,7 @@ def _get_rgb_xyz_map(cls) -> tuple[Tensor, Tensor]: tuple The mapping and inverse Tensors for rgb to xyz color space conversion """ - mapping = np.array([[10135552 / 24577794, 8788810 / 24577794, 4435075 / 24577794], + mapping = np.asarray([[10135552 / 24577794, 8788810 / 24577794, 4435075 / 24577794], [2613072 / 12288897, 8788810 / 12288897, 887015 / 12288897], [1425312 / 73733382, 8788810 / 73733382, 70074185 / 73733382]]) inverse = np.linalg.inv(mapping) diff --git a/lib/model/autoclip.py b/lib/model/autoclip.py index 826959d66b..7297be7f01 100644 --- a/lib/model/autoclip.py +++ b/lib/model/autoclip.py @@ -64,7 +64,7 @@ def _percentile(self, grad_history: tf.Tensor) -> tf.Tensor: constant_values=1) nan_batch_members = tf.reshape(nan_batch_members, shape=right_rank_matched_shape) - nan = np.array(np.nan, gathered_hist.dtype.as_numpy_dtype) + nan = np.asarray(np.nan, gathered_hist.dtype.as_numpy_dtype) gathered_hist = tf.where(nan_batch_members, nan, gathered_hist) return gathered_hist diff --git a/lib/model/initializers.py b/lib/model/initializers.py index 41f7682371..81f4052c09 100644 --- a/lib/model/initializers.py +++ b/lib/model/initializers.py @@ -281,7 +281,7 @@ def _symmetrize(var_a): """ Make the given tensor symmetrical. """ var_b = np.transpose(var_a, axes=(0, 1, 3, 2)) diag = var_a.diagonal(axis1=2, axis2=3) - var_c = np.array([[np.diag(arr) for arr in batch] for batch in diag]) + var_c = np.asarray([[np.diag(arr) for arr in batch] for batch in diag]) return var_a + var_b - var_c @staticmethod diff --git a/lib/model/losses/feature_loss.py b/lib/model/losses/feature_loss.py index a23060f3e5..a95ffeb887 100644 --- a/lib/model/losses/feature_loss.py +++ b/lib/model/losses/feature_loss.py @@ -300,9 +300,9 @@ def __init__(self, # pylint:disable=too-many-arguments self._use_lpips = lpips self._normalize = normalize self._ret_per_layer = ret_per_layer - self._shift = K.constant(np.array([-.030, -.088, -.188], + self._shift = K.constant(np.asarray([-.030, -.088, -.188], dtype="float32")[None, None, None, :]) - self._scale = K.constant(np.array([.458, .448, .450], + self._scale = K.constant(np.asarray([.458, .448, .450], dtype="float32")[None, None, None, :]) # Loss needs to be done as fp32. We could cast at output, but better to update the model diff --git a/lib/model/losses/perceptual_loss.py b/lib/model/losses/perceptual_loss.py index 0fc09b81d7..f35f978c0d 100644 --- a/lib/model/losses/perceptual_loss.py +++ b/lib/model/losses/perceptual_loss.py @@ -214,7 +214,7 @@ def _scharr_edges(cls, image: tf.Tensor, magnitude: bool) -> tf.Tensor: image_shape = K.shape(image) # 5x5 modified Scharr kernel ( reshape to (5,5,1,2) ) - matrix = np.array([[[[0.00070, 0.00070]], + matrix = np.asarray([[[[0.00070, 0.00070]], [[0.00520, 0.00370]], [[0.03700, 0.00000]], [[0.00520, -0.0037]], @@ -540,7 +540,7 @@ def _generate_spatial_filters(self) -> tuple[tf.Tensor, int]: mapping["BY"]["b1"], mapping["BY"]["b2"]) - weights = np.array([self._generate_weights(mapping[channel], domain) + weights = np.asarray([self._generate_weights(mapping[channel], domain) for channel in ("A", "RG", "BY")]) weights = K.constant(np.moveaxis(weights, 0, -1), dtype="float32") diff --git a/lib/training/augmentation.py b/lib/training/augmentation.py index 8fd911969b..750dcd946e 100644 --- a/lib/training/augmentation.py +++ b/lib/training/augmentation.py @@ -89,7 +89,7 @@ def _load_lab(self) -> None: amount_l = int(color_lightness) / 100 amount_ab = int(color_ab) / 100 - self.lab_adjust = np.array([amount_l, amount_ab, amount_ab], dtype="float32") + self.lab_adjust = np.asarray([amount_l, amount_ab, amount_ab], dtype="float32") logger.debug("lab_adjust: %s", self.lab_adjust) def _load_transform(self) -> None: @@ -137,7 +137,7 @@ def _load_warp_to_landmarks(self, batch_size: int) -> None: """ p_mx = self._size - 1 p_hf = (self._size // 2) - 1 - edge_anchors = np.array([(0, 0), (0, p_mx), (p_mx, p_mx), (p_mx, 0), + edge_anchors = np.asarray([(0, 0), (0, p_mx), (p_mx, p_mx), (p_mx, 0), (p_hf, 0), (p_hf, p_mx), (p_mx, p_hf), (0, p_hf)]).astype("int32") edge_anchors = np.broadcast_to(edge_anchors, (batch_size, 8, 2)) grids = np.mgrid[0: p_mx: complex(self._size), # type:ignore[misc] @@ -288,7 +288,7 @@ def transform(self, batch: np.ndarray): tform = np.random.uniform(-self._constants.transform_shift, self._constants.transform_shift, size=(self._batch_size, 2)).astype("float32") - mats = np.array( + mats = np.asarray( [cv2.getRotationMatrix2D((self._processing_size // 2, self._processing_size // 2), rot, scl) @@ -372,10 +372,10 @@ def _random_warp(self, batch: np.ndarray) -> np.ndarray: rands = np.random.normal(size=(self._batch_size, 2, 5, 5), scale=self._warp_scale).astype("float32") batch_maps = ne.evaluate("m + r", local_dict={"m": self._constants.warp_maps, "r": rands}) - batch_interp = np.array([[cv2.resize(map_, self._constants.warp_pad)[slices, slices] + batch_interp = np.asarray([[cv2.resize(map_, self._constants.warp_pad)[slices, slices] for map_ in maps] for maps in batch_maps]) - warped_batch = np.array([cv2.remap(image, interp[0], interp[1], cv2.INTER_LINEAR) + warped_batch = np.asarray([cv2.remap(image, interp[0], interp[1], cv2.INTER_LINEAR) for image, interp in zip(batch, batch_interp)]) logger.trace("Warped image shape: %s", warped_batch.shape) # type:ignore[attr-defined] @@ -425,14 +425,14 @@ def _random_warp_landmarks(self, lbatch_src = [np.delete(src, idxs, axis=0) for idxs, src in zip(rem_indices, batch_src)] lbatch_dst = [np.delete(dst, idxs, axis=0) for idxs, dst in zip(rem_indices, batch_dst)] - grid_z = np.array([griddata(dst, src, (grids[0], grids[1]), method="linear") + grid_z = np.asarray([griddata(dst, src, (grids[0], grids[1]), method="linear") for src, dst in zip(lbatch_src, lbatch_dst)]) maps = grid_z.reshape((self._batch_size, self._processing_size, self._processing_size, 2)).astype("float32") - warped_batch = np.array([cv2.remap(image, + warped_batch = np.asarray([cv2.remap(image, map_[..., 1], map_[..., 0], cv2.INTER_LINEAR, diff --git a/lib/training/generator.py b/lib/training/generator.py index 0624246e0a..40e83ecda4 100644 --- a/lib/training/generator.py +++ b/lib/training/generator.py @@ -302,7 +302,7 @@ def _apply_mask(self, detected_faces: list[DetectedFace], batch: np.ndarray) -> if not self._use_mask: return - masks = np.array([face.get_training_masks() for face in detected_faces]) + masks = np.asarray([face.get_training_masks() for face in detected_faces]) batch[..., 3:] = masks logger.trace("side: %s, masks: %s, batch: %s", # type:ignore[attr-defined] @@ -457,7 +457,7 @@ def _create_targets(self, batch: np.ndarray) -> list[np.ndarray]: # Rolling buffer here makes next to no difference, so just create array on the fly retval = [self._to_float32(batch)] else: - retval = [self._to_float32(np.array([cv2.resize(image, + retval = [self._to_float32(np.asarray([cv2.resize(image, (size, size), interpolation=cv2.INTER_AREA) for image in batch])) @@ -523,7 +523,7 @@ def process_batch(self, # TODO Look at potential for applying mask on input # Random Warp if self._warp_to_landmarks: - landmarks = np.array([face.aligned.landmarks for face in detected_faces]) + landmarks = np.asarray([face.aligned.landmarks for face in detected_faces]) batch_dst_pts = self._get_closest_match(filenames, landmarks) warp_kwargs = {"batch_src_points": landmarks, "batch_dst_points": batch_dst_pts} else: @@ -535,7 +535,7 @@ def process_batch(self, **warp_kwargs) if self._model_input_size != self._process_size: - feed = self._to_float32(np.array([cv2.resize(image, + feed = self._to_float32(np.asarray([cv2.resize(image, (self._model_input_size, self._model_input_size), interpolation=cv2.INTER_AREA) @@ -580,7 +580,7 @@ def _get_closest_match(self, filenames: list[str], batch_src_points: np.ndarray) landmarks = {key: lms * scale for key, lms in landmarks.items()} closest_matches = self._cache_closest_matches(filenames, batch_src_points, landmarks) - batch_dst_points = np.array([landmarks[choice(fname)] for fname in closest_matches]) + batch_dst_points = np.asarray([landmarks[choice(fname)] for fname in closest_matches]) logger.trace("Returning: (batch_dst_points: %s)", # type:ignore[attr-defined] batch_dst_points.shape) return batch_dst_points @@ -603,7 +603,7 @@ def _cache_closest_matches(self, """ logger.trace("Caching closest matches") # type:ignore dst_landmarks = list(landmarks.items()) - dst_points = np.array([lm[1] for lm in dst_landmarks]) + dst_points = np.asarray([lm[1] for lm in dst_landmarks]) batch_closest_matches: list[tuple[str, ...]] = [] for filename, src_points in zip(filenames, batch_src_points): @@ -663,7 +663,7 @@ def _create_samples(self, assert self._config["centering"] in T.get_args(CenteringType) retval = np.empty((full_size, full_size, 3), dtype="float32") - retval = self._to_float32(np.array([ + retval = self._to_float32(np.asarray([ AlignedFace(face.landmarks_xy, image=images[idx], centering=T.cast(CenteringType, @@ -726,7 +726,7 @@ def process_batch(self, # resize in these rare instances out_size = max(self._output_sizes) if self._process_size > out_size: - feed = np.array([cv2.resize(img, (out_size, out_size), interpolation=cv2.INTER_AREA) + feed = np.asarray([cv2.resize(img, (out_size, out_size), interpolation=cv2.INTER_AREA) for img in feed]) samples = self._create_samples(images, detected_faces) diff --git a/plugins/convert/color/manual_balance.py b/plugins/convert/color/manual_balance.py index 7dc6950bb6..248ac0a9ef 100644 --- a/plugins/convert/color/manual_balance.py +++ b/plugins/convert/color/manual_balance.py @@ -11,7 +11,7 @@ class Color(Adjustment): def process(self, old_face, new_face, raw_mask): image = self.convert_colorspace(new_face * 255.0) - adjustment = np.array([self.config["balance_1"] / 100.0, + adjustment = np.asarray([self.config["balance_1"] / 100.0, self.config["balance_2"] / 100.0, self.config["balance_3"] / 100.0]).astype("float32") for idx in range(3): diff --git a/plugins/convert/mask/mask_blend.py b/plugins/convert/mask/mask_blend.py index 6683bdf10b..9be1abdf07 100644 --- a/plugins/convert/mask/mask_blend.py +++ b/plugins/convert/mask/mask_blend.py @@ -321,6 +321,6 @@ def _get_erosion_kernels(self, mask: np.ndarray) -> list[np.ndarray]: if idx > 1: pos = 0 if idx % 2 == 0 else 1 kernel[pos] = 1 # Set x/y to 1px based on whether eroding top/bottom, left/right - kernels.append(cv2.getStructuringElement(shape, kernel) if size else np.array(0)) + kernels.append(cv2.getStructuringElement(shape, kernel) if size else np.asarray(0)) logger.trace("Erosion kernels: %s", [k.shape for k in kernels]) # type: ignore return kernels diff --git a/plugins/convert/writer/patch.py b/plugins/convert/writer/patch.py index fe4a7f4bb1..dae24b9e0b 100644 --- a/plugins/convert/writer/patch.py +++ b/plugins/convert/writer/patch.py @@ -47,7 +47,7 @@ def __init__(self, output_folder: str, patch_size: int, **kwargs) -> None: self._dummy_patch = np.zeros((1, patch_size, patch_size, 4), dtype=np.float32) - tl_box = np.array([[0, 0], [patch_size, 0], [patch_size, patch_size], [0, patch_size]], + tl_box = np.asarray([[0, 0], [patch_size, 0], [patch_size, patch_size], [0, patch_size]], dtype=np.float32) self._patch_corner = {"top-left": tl_box[0], "top-right": tl_box[1], @@ -169,9 +169,9 @@ def _get_inverse_matrices(cls, matrices: np.ndarray) -> np.ndarray: The inverse transformation matrices """ if not np.any(matrices): - return np.array([[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]], dtype=np.float32) + return np.asarray([[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]], dtype=np.float32) - identity = np.array([[[0., 0., 1.]]], dtype=np.float32) + identity = np.asarray([[[0., 0., 1.]]], dtype=np.float32) mat = np.concatenate([matrices, np.repeat(identity, matrices.shape[0], axis=0)], axis=1) retval = np.linalg.inv(mat) logger.trace("matrix: %s, inverse: %s", mat, retval) # type:ignore[attr-defined] @@ -220,7 +220,7 @@ def _get_roi(self, matrices: np.ndarray) -> np.ndarray: """ retval = [cv2.transform(np.expand_dims(self._box, axis=1), mat[:2, ...]).squeeze() for mat in matrices] - return np.array(retval, dtype=np.float32) + return np.asarray(retval, dtype=np.float32) def pre_encode(self, image: np.ndarray, **kwargs) -> list[list[bytes]]: """ Pre_encode the image in lib/convert.py threads as it is a LOT quicker. @@ -245,7 +245,7 @@ def pre_encode(self, image: np.ndarray, **kwargs) -> list[list[bytes]]: logger.trace("Pre-encoding image") # type:ignore[attr-defined] retval = [] canvas_size: tuple[int, int] = kwargs.get("canvas_size", (1, 1)) - matrices: np.ndarray = kwargs.get("matrices", np.array([])) + matrices: np.ndarray = kwargs.get("matrices", np.asarray([])) if not np.any(image) and self.config["empty_frames"] == "blank": image = self._dummy_patch diff --git a/plugins/extract/_base.py b/plugins/extract/_base.py index 588394adee..e551cc8132 100644 --- a/plugins/extract/_base.py +++ b/plugins/extract/_base.py @@ -82,8 +82,8 @@ class ExtractorBatch: image: list[np.ndarray] = field(default_factory=list) detected_faces: Sequence[DetectedFace | list[DetectedFace]] = field(default_factory=list) filename: list[str] = field(default_factory=list) - feed: np.ndarray = np.array([]) - prediction: np.ndarray = np.array([]) + feed: np.ndarray = np.asarray([]) + prediction: np.ndarray = np.asarray([]) data: list[dict[str, T.Any]] = field(default_factory=list) def __repr__(self) -> str: diff --git a/plugins/extract/align/_base/aligner.py b/plugins/extract/align/_base/aligner.py index 6746daf631..77df423e03 100644 --- a/plugins/extract/align/_base/aligner.py +++ b/plugins/extract/align/_base/aligner.py @@ -76,10 +76,10 @@ class AlignerBatch(ExtractorBatch): """ batch_id: int = 0 detected_faces: list[DetectedFace] = field(default_factory=list) - landmarks: np.ndarray = np.array([]) + landmarks: np.ndarray = np.asarray([]) refeeds: list[np.ndarray] = field(default_factory=list) second_pass: bool = False - second_pass_masks: np.ndarray = np.array([]) + second_pass_masks: np.ndarray = np.asarray([]) def __repr__(self): """ Prettier repr for debug printing """ @@ -360,7 +360,7 @@ def finalize(self, batch: BatchType) -> Generator[ExtractMedia, None, None]: return for face, landmarks in zip(batch.detected_faces, batch.landmarks): if not isinstance(landmarks, np.ndarray): - landmarks = np.array(landmarks) + landmarks = np.asarray(landmarks) face.add_landmarks_xy(landmarks) logger.trace("Item out: %s", batch) # type: ignore[attr-defined] @@ -431,7 +431,7 @@ def _process_input_first_pass(self, batch: AlignerBatch) -> None: batch: :class:`AlignerBatch` Contains the batch that is currently being passed through the plugin process """ - original_boxes = np.array([(face.left, face.top, face.width, face.height) + original_boxes = np.asarray([(face.left, face.top, face.width, face.height) for face in batch.detected_faces]) adjusted_boxes = self._get_adjusted_boxes(original_boxes) @@ -532,7 +532,7 @@ def _predict(self, batch: BatchType) -> AlignerBatch: try: preds = [self.predict(feed) for feed in batch.refeeds] try: - batch.prediction = np.array(preds) + batch.prediction = np.asarray(preds) except ValueError as err: # If refeed batches are different sizes, Numpy will error, so we need to explicitly # set the dtype to 'object' rather than let it infer @@ -544,7 +544,7 @@ def _predict(self, batch: BatchType) -> AlignerBatch: logger.trace( # type:ignore[attr-defined] "Mismatched array sizes, setting dtype to object: %s", [p.shape for p in preds]) - batch.prediction = np.array(preds, dtype="object") + batch.prediction = np.asarray(preds, dtype="object") else: raise @@ -586,7 +586,7 @@ def _process_refeeds(self, batch: AlignerBatch) -> list[AlignerBatch]: data = batch.data[selected_idx] if batch.data else {} selected_idx += 1 else: # All resuts have been filtered out - feed = pred = np.array([]) + feed = pred = np.asarray([]) data = {} subbatch = AlignerBatch(batch_id=batch.batch_id, @@ -698,7 +698,7 @@ def _process_output_first_pass(self, subbatches: list[AlignerBatch]) -> tuple[np should be filtered out prior to further processing """ masks = self._get_refeed_filter_masks(subbatches) - all_landmarks = np.array([sub.landmarks for sub in subbatches]) + all_landmarks = np.asarray([sub.landmarks for sub in subbatches]) # re-align not selected or not filtering the re-feeds if not self._re_align.do_refeeds: @@ -722,7 +722,7 @@ def _process_output_second_pass(self, """ self._re_align.process_output(subbatches, masks) masks = self._get_refeed_filter_masks(subbatches, original_masks=masks) - all_landmarks = np.array([sub.landmarks for sub in subbatches]) + all_landmarks = np.asarray([sub.landmarks for sub in subbatches]) return self._get_mean_landmarks(all_landmarks, masks) def _process_output(self, batch: BatchType) -> AlignerBatch: @@ -771,7 +771,7 @@ def _normalize_faces(self, faces: np.ndarray) -> np.ndarray: return faces logger.trace("Normalizing faces") # type: ignore[attr-defined] meth = getattr(self, f"_normalize_{self._normalize_method.lower()}") - faces = np.array([meth(face) for face in faces]) + faces = np.asarray([meth(face) for face in faces]) logger.trace("Normalized faces") # type: ignore[attr-defined] return faces diff --git a/plugins/extract/align/_base/processing.py b/plugins/extract/align/_base/processing.py index efdeec9468..15e7a5c733 100644 --- a/plugins/extract/align/_base/processing.py +++ b/plugins/extract/align/_base/processing.py @@ -352,8 +352,8 @@ def add_batch(self, batch: AlignerBatch) -> None: with self._queue_lock: logger.trace("Queueing for second pass: %s", batch) # type: ignore[attr-defined] batch.second_pass = True - batch.feed = np.array([]) - batch.prediction = np.array([]) + batch.feed = np.asarray([]) + batch.prediction = np.asarray([]) batch.refeeds = [] batch.data = [] self._queued.append(batch) @@ -397,7 +397,7 @@ def process_batch(self, batch: AlignerBatch) -> list[np.ndarray]: centering=self._centering) for image, lms, msk in zip(batch.image, landmarks, masks) if not msk] - faces = np.array([aligned.face for aligned in aligned_faces + faces = np.asarray([aligned.face for aligned in aligned_faces if aligned.face is not None]) retval.append(faces) batch.data.append({"aligned_faces": aligned_faces}) @@ -408,7 +408,7 @@ def process_batch(self, batch: AlignerBatch) -> list[np.ndarray]: with self._tracked_lock: self._tracked_batchs[batch.batch_id] = {"filtered_landmarks": filtered_landmarks} - batch.landmarks = np.array([]) # Clear the old landmarks + batch.landmarks = np.asarray([]) # Clear the old landmarks return retval def _transform_to_frame(self, batch: AlignerBatch) -> np.ndarray: @@ -427,7 +427,7 @@ def _transform_to_frame(self, batch: AlignerBatch) -> np.ndarray: The landmarks transformed to frame space """ faces: list[AlignedFace] = batch.data[0]["aligned_faces"] - retval = np.array([aligned.transform_points(landmarks, invert=True) + retval = np.asarray([aligned.transform_points(landmarks, invert=True) for landmarks, aligned in zip(batch.landmarks, faces)]) logger.trace("Transformed points: original max: %s, " # type: ignore[attr-defined] "new max: %s", batch.landmarks.max(), retval.max()) diff --git a/plugins/extract/align/cv2_dnn.py b/plugins/extract/align/cv2_dnn.py index f646b7cdc2..6af968df1b 100644 --- a/plugins/extract/align/cv2_dnn.py +++ b/plugins/extract/align/cv2_dnn.py @@ -90,7 +90,7 @@ def process_input(self, batch: BatchType) -> None: """ assert isinstance(batch, AlignerBatch) lfaces, roi, offsets = self.align_image(batch) - batch.feed = np.array(lfaces)[..., :3] + batch.feed = np.asarray(lfaces)[..., :3] batch.data.append({"roi": roi, "offsets": offsets}) def _get_box_and_offset(self, face: DetectedFace) -> tuple[list[int], int]: @@ -312,5 +312,5 @@ def get_pts_from_predict(self, batch: AlignerBatch): points[:, 0] += (roi[0] - offset[0]) points[:, 1] += (roi[1] - offset[1]) landmarks.append(points) - batch.landmarks = np.array(landmarks) + batch.landmarks = np.asarray(landmarks) logger.trace("Predicted Landmarks: %s", batch.landmarks) # type:ignore[attr-defined] diff --git a/plugins/extract/align/external.py b/plugins/extract/align/external.py index 929e9b11c9..27918d3dae 100644 --- a/plugins/extract/align/external.py +++ b/plugins/extract/align/external.py @@ -118,7 +118,7 @@ def _import_face(self, face: dict[str, list[int] | list[list[float]]]) -> np.nda if len(landmarks) not in (4, 68): raise FaceswapError("Imported 'landmarks_2d' should be either 68 facial feature " "landmarks or 4 ROI corner locations") - retval = np.array(landmarks, dtype="float32") + retval = np.asarray(landmarks, dtype="float32") if retval.shape[-1] != 2: raise FaceswapError("Imported 'landmarks_2d' should be formatted as a list of (x, y) " "co-ordinates") @@ -140,7 +140,7 @@ def import_data(self, data: dict[str, list[dict[str, list[int] | list[list[float self._check_for_video(list(data)[0]) for key, faces in data.items(): try: - lms = np.array([self._import_face(face) for face in faces], dtype="float32") + lms = np.asarray([self._import_face(face) for face in faces], dtype="float32") if not np.any(lms): logger.trace("Skipping frame '%s' with no faces") # type:ignore[attr-defined] continue @@ -167,7 +167,7 @@ def process_input(self, batch: BatchType) -> None: batch: :class:`~plugins.extract.detect._base.AlignerBatch` The batch to be processed by the plugin """ - batch.feed = np.array([(self._get_key(os.path.basename(f)), i.shape[:2]) + batch.feed = np.asarray([(self._get_key(os.path.basename(f)), i.shape[:2]) for f, i in zip(batch.filename, batch.image)], dtype="object") def faces_to_feed(self, faces: np.ndarray) -> np.ndarray: @@ -241,7 +241,7 @@ def predict(self, feed: np.ndarray) -> np.ndarray: else: self._imported[key] = (remaining - 1, all_lms) - return np.array(preds, dtype="float32") + return np.asarray(preds, dtype="float32") def process_output(self, batch: BatchType) -> None: """ Process the imported data to the landmarks attribute diff --git a/plugins/extract/align/fan.py b/plugins/extract/align/fan.py index a829f3bcac..255873e1c4 100644 --- a/plugins/extract/align/fan.py +++ b/plugins/extract/align/fan.py @@ -76,7 +76,7 @@ def process_input(self, batch: BatchType) -> None: assert isinstance(batch, AlignerBatch) logger.trace("Aligning faces around center") # type:ignore[attr-defined] center_scale = self.get_center_scale(batch.detected_faces) - batch.feed = np.array(self.crop(batch, center_scale))[..., :3] + batch.feed = np.asarray(self.crop(batch, center_scale))[..., :3] batch.data.append({"center_scale": center_scale}) logger.trace("Aligned image around center") # type:ignore[attr-defined] @@ -254,7 +254,7 @@ def get_pts_from_predict(self, batch: AlignerBatch) -> None: resolution = np.full((num_images, num_landmarks), 64, dtype='int32') subpixel_landmarks = np.ones((num_images, num_landmarks, 3), dtype='float32') - indices = np.array(np.unravel_index(batch.prediction.reshape(num_images, + indices = np.asarray(np.unravel_index(batch.prediction.reshape(num_images, num_landmarks, -1).argmax(-1), (batch.prediction.shape[2], # height diff --git a/plugins/extract/detect/_base.py b/plugins/extract/detect/_base.py index 3c3221d84f..6edf943808 100644 --- a/plugins/extract/detect/_base.py +++ b/plugins/extract/detect/_base.py @@ -60,7 +60,7 @@ class DetectorBatch(ExtractorBatch): rotation_matrix: list[np.ndarray] = field(default_factory=list) scale: list[float] = field(default_factory=list) pad: list[tuple[int, int]] = field(default_factory=list) - initial_feed: np.ndarray = np.array([]) + initial_feed: np.ndarray = np.asarray([]) def __repr__(self): """ Prettier repr for debug printing """ @@ -282,8 +282,8 @@ def _to_detected_face(left: float, top: float, right: float, bottom: float) -> D def _predict(self, batch: BatchType) -> DetectorBatch: """ Wrap models predict function in rotations """ assert isinstance(batch, DetectorBatch) - batch.rotation_matrix = [np.array([]) for _ in range(len(batch.feed))] - found_faces: list[np.ndarray] = [np.array([]) for _ in range(len(batch.feed))] + batch.rotation_matrix = [np.asarray([]) for _ in range(len(batch.feed))] + found_faces: list[np.ndarray] = [np.asarray([]) for _ in range(len(batch.feed))] for angle in self.rotation: # Rotate the batch and insert placeholders for already found faces self._rotate_batch(batch, angle) @@ -293,7 +293,7 @@ def _predict(self, batch: BatchType) -> DetectorBatch: batch.prediction = pred else: try: - batch.prediction = np.array([b if b.any() else p + batch.prediction = np.asarray([b if b.any() else p for b, p in zip(batch.prediction, pred)]) except ValueError as err: # If batches are different sizes after rotation Numpy will error, so we @@ -303,7 +303,7 @@ def _predict(self, batch: BatchType) -> DetectorBatch: # has an inhomogeneous shape after 1 dimensions. The detected shape was # (8,) + inhomogeneous part if "inhomogeneous" in str(err): - batch.prediction = np.array([b if b.any() else p + batch.prediction = np.asarray([b if b.any() else p for b, p in zip(batch.prediction, pred)], dtype="object") logger.trace( # type:ignore[attr-defined] @@ -338,7 +338,7 @@ def _predict(self, batch: BatchType) -> DetectorBatch: logger.trace("Faces found for all images") # type:ignore[attr-defined] break - batch.prediction = np.array(found_faces, dtype="object") + batch.prediction = np.asarray(found_faces, dtype="object") logger.trace("detect_prediction output: (filenames: %s, " # type:ignore[attr-defined] "prediction: %s, rotmat: %s)", batch.filename, batch.prediction, batch.rotation_matrix) @@ -585,7 +585,7 @@ def _rotate_batch(self, batch: DetectorBatch, angle: int) -> None: image, matrix = self._rotate_image_by_angle(img, angle) feeds.append(image) rotmats.append(matrix) - batch.feed = np.array(feeds, dtype="float32") + batch.feed = np.asarray(feeds, dtype="float32") batch.rotation_matrix = rotmats @staticmethod @@ -613,7 +613,7 @@ def _rotate_face(face: DetectedFace, rotation_matrix: np.ndarray) -> DetectedFac [face.left, face.bottom]] rotation_matrix = cv2.invertAffineTransform(rotation_matrix) - points = np.array(bounding_box, "int32") + points = np.asarray(bounding_box, "int32") points = np.expand_dims(points, axis=0) transformed = cv2.transform(points, rotation_matrix).astype("int32") rotated = transformed.squeeze() diff --git a/plugins/extract/detect/cv2_dnn.py b/plugins/extract/detect/cv2_dnn.py index 9f98918e06..90ad26a23f 100644 --- a/plugins/extract/detect/cv2_dnn.py +++ b/plugins/extract/detect/cv2_dnn.py @@ -60,7 +60,7 @@ def finalize_predictions(self, predictions: np.ndarray) -> np.ndarray: (predictions[0, 0, i, 5] * self.input_size), (predictions[0, 0, i, 6] * self.input_size)]) logger.trace("faces: %s", faces) # type:ignore[attr-defined] - return np.array(faces)[None, ...] + return np.asarray(faces)[None, ...] def process_output(self, batch: BatchType) -> None: """ Compile found faces for output """ diff --git a/plugins/extract/detect/external.py b/plugins/extract/detect/external.py index 98876e2a95..fed45b2d95 100644 --- a/plugins/extract/detect/external.py +++ b/plugins/extract/detect/external.py @@ -66,7 +66,7 @@ def _compile_detection_image(self, item: ExtractMedia pad: int The amount of padding applied to the image (0, 0) """ - return np.array(item.image_shape[:2], dtype="int64"), 1.0, (0, 0) + return np.asarray(item.image_shape[:2], dtype="int64"), 1.0, (0, 0) def _check_for_video(self, filename: str) -> None: """ Check a sample filename from the import file for a file extension to set @@ -155,7 +155,7 @@ def _validate_landmarks(self, landmarks: list[list[float]]) -> np.ndarray: if len(landmarks) not in (4, 68): raise FaceswapError("Imported 'landmarks_2d' should be either 68 facial feature " "landmarks or 4 ROI corner locations") - retval = np.array(landmarks, dtype="float32") + retval = np.asarray(landmarks, dtype="float32") if retval.shape[-1] != 2: raise FaceswapError("Imported 'landmarks_2d' should be formatted as a list of (x, y) " "co-ordinates") @@ -245,7 +245,7 @@ def import_data(self, for key, faces in data.items(): try: store_key = self._get_key(key) - self._imported[store_key] = np.array([self._import_frame_face(face, align_origin) + self._imported[store_key] = np.asarray([self._import_frame_face(face, align_origin) for face in faces], dtype="int32") except FaceswapError as err: logger.error(str(err)) @@ -260,7 +260,7 @@ def process_input(self, batch: BatchType) -> None: batch: :class:`~plugins.extract.detect._base.DetectorBatch` The batch to be processed by the plugin """ - batch.feed = np.array([(self._get_key(os.path.basename(f)), i) + batch.feed = np.asarray([(self._get_key(os.path.basename(f)), i) for f, i in zip(batch.filename, batch.image)], dtype="object") def _adjust_for_origin(self, box: np.ndarray, frame_dims: tuple[int, int]) -> np.ndarray: @@ -301,7 +301,7 @@ def predict(self, feed: np.ndarray) -> list[np.ndarray]: # type:ignore[override The bounding boxes for the given filenames """ self._missing.extend(f[0] for f in feed if f[0] not in self._imported) - return [self._adjust_for_origin(self._imported.pop(f[0], np.array([], dtype="int32")), + return [self._adjust_for_origin(self._imported.pop(f[0], np.asarray([], dtype="int32")), f[1]) for f in feed] diff --git a/plugins/extract/detect/mtcnn.py b/plugins/extract/detect/mtcnn.py index 78859ca249..be6b63b8cb 100644 --- a/plugins/extract/detect/mtcnn.py +++ b/plugins/extract/detect/mtcnn.py @@ -76,7 +76,7 @@ def process_input(self, batch: BatchType) -> None: batch: :class:`~plugins.extract.detect._base.DetectorBatch` Contains the batch that is currently being passed through the plugin process """ - batch.feed = (np.array(batch.image, dtype="float32") - 127.5) / 127.5 + batch.feed = (np.asarray(batch.image, dtype="float32") - 127.5) / 127.5 def predict(self, feed: np.ndarray) -> np.ndarray: """ Run model to get predictions @@ -271,7 +271,7 @@ def __call__(self, images: np.ndarray) -> list[np.ndarray]: rectangles[idx].extend(rect) scores[idx].extend(score) - return [nms(np.array(rect), np.array(score), 0.7, "iou")[0] # don't output scores + return [nms(np.asarray(rect), np.asarray(score), 0.7, "iou")[0] # don't output scores for rect, score in zip(rectangles, scores)] def _detect_face_12net(self, @@ -300,14 +300,14 @@ def _detect_face_12net(self, in_side = 2 * size + 11 stride = 0. if size == 1 else float(in_side - 12) / (size - 1) (var_x, var_y) = np.nonzero(class_probabilities >= self._threshold) - boundingbox = np.array([var_x, var_y]).T + boundingbox = np.asarray([var_x, var_y]).T boundingbox = np.concatenate((np.fix((stride * (boundingbox) + 0) * scale), np.fix((stride * (boundingbox) + 11) * scale)), axis=1) offset = roi[:4, var_x, var_y].T boundingbox = boundingbox + offset * 12.0 * scale rectangles = np.concatenate((boundingbox, - np.array([class_probabilities[var_x, var_y]]).T), axis=1) + np.asarray([class_probabilities[var_x, var_y]]).T), axis=1) rectangles = rect2square(rectangles) np.clip(rectangles[..., :4], 0., self._input_size, out=rectangles[..., :4]) @@ -402,7 +402,7 @@ def __call__(self, ret: list[np.ndarray] = [] for idx, (rectangles, image) in enumerate(zip(rectangle_batch, images)): if not np.any(rectangles): - ret.append(np.array([])) + ret.append(np.asarray([])) continue feed_batch = np.empty((rectangles.shape[0], 24, 24, 3), dtype="float32") @@ -441,7 +441,7 @@ def _filter_face_24net(self, pick = np.nonzero(prob >= self._threshold) bbox = rectangles.T[:4, pick] - scores = np.array([prob[pick]]).T.ravel() + scores = np.asarray([prob[pick]]).T.ravel() deltas = roi.T[:4, pick] dims = np.tile([bbox[2] - bbox[0], bbox[3] - bbox[1]], (2, 1, 1)) @@ -575,10 +575,10 @@ def _filter_face_48net(self, class_probabilities: np.ndarray, """ prob = class_probabilities[:, 1] pick = np.nonzero(prob >= self._threshold)[0] - scores = np.array([prob[pick]]).T.ravel() + scores = np.asarray([prob[pick]]).T.ravel() bbox = rectangles[pick] - dims = np.array([bbox[..., 2] - bbox[..., 0], bbox[..., 3] - bbox[..., 1]]).T + dims = np.asarray([bbox[..., 2] - bbox[..., 0], bbox[..., 3] - bbox[..., 1]]).T pts = np.vstack( np.hsplit(points[pick], 2)).reshape(2, -1, 5).transpose(1, 2, 0).reshape(-1, 10) @@ -678,7 +678,7 @@ def detect_faces(self, batch: np.ndarray) -> tuple[np.ndarray, tuple[np.ndarray] rectangles = self._rnet(batch, rectangles) ret_boxes, ret_points = zip(*self._onet(batch, rectangles)) - return np.array(ret_boxes, dtype="object"), ret_points + return np.asarray(ret_boxes, dtype="object"), ret_points def nms(rectangles: np.ndarray, diff --git a/plugins/extract/detect/s3fd.py b/plugins/extract/detect/s3fd.py index 89d538b76f..a7c6145b6d 100644 --- a/plugins/extract/detect/s3fd.py +++ b/plugins/extract/detect/s3fd.py @@ -54,7 +54,7 @@ def init_model(self) -> None: def process_input(self, batch: BatchType) -> None: """ Compile the detection image(s) for prediction """ assert isinstance(self.model, S3fd) - batch.feed = self.model.prepare_batch(np.array(batch.image)) + batch.feed = self.model.prepare_batch(np.asarray(batch.image)) def predict(self, feed: np.ndarray) -> np.ndarray: """ Run model to get predictions """ @@ -243,7 +243,7 @@ def __init__(self, self.define_model(self.model_definition) self.load_model_weights() self.confidence = confidence - self.average_img = np.array([104.0, 117.0, 123.0]) + self.average_img = np.asarray([104.0, 117.0, 123.0]) logger.debug("Initialized: %s", self.__class__.__name__) def model_definition(self) -> tuple[list[Tensor], list[Tensor]]: @@ -411,7 +411,7 @@ def finalize_predictions(self, bounding_boxes_scales: list[np.ndarray]) -> np.nd boxes = self._post_process(bboxlist) finallist = self._nms(boxes, 0.5) ret.append(finallist) - return np.array(ret, dtype="object") + return np.asarray(ret, dtype="object") def _post_process(self, bboxlist: list[np.ndarray]) -> np.ndarray: """ Perform post processing on output @@ -429,11 +429,11 @@ def _post_process(self, bboxlist: list[np.ndarray]) -> np.ndarray: score = ocls[0, hindex, windex, 1] if score >= self.confidence: loc = np.ascontiguousarray(oreg[0, hindex, windex, :]).reshape((1, 4)) - priors = np.array([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]]) + priors = np.asarray([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]]) box = self.decode(loc, priors) x_1, y_1, x_2, y_2 = box[0] * 1.0 retval.append([x_1, y_1, x_2, y_2, score]) - return_numpy = np.array(retval) if len(retval) != 0 else np.zeros((1, 5)) + return_numpy = np.asarray(retval) if len(retval) != 0 else np.zeros((1, 5)) return return_numpy @staticmethod diff --git a/plugins/extract/mask/bisenet_fp.py b/plugins/extract/mask/bisenet_fp.py index cf8a177fe6..a30124dd3e 100644 --- a/plugins/extract/mask/bisenet_fp.py +++ b/plugins/extract/mask/bisenet_fp.py @@ -124,7 +124,7 @@ def process_input(self, batch: BatchType) -> None: mean = (0.384, 0.314, 0.279) if self._is_faceswap else (0.485, 0.456, 0.406) std = (0.324, 0.286, 0.275) if self._is_faceswap else (0.229, 0.224, 0.225) - batch.feed = ((np.array([T.cast(np.ndarray, feed.face)[..., :3] + batch.feed = ((np.asarray([T.cast(np.ndarray, feed.face)[..., :3] for feed in batch.feed_faces], dtype="float32") / 255.0) - mean) / std logger.trace("feed shape: %s", batch.feed.shape) # type:ignore diff --git a/plugins/extract/mask/components.py b/plugins/extract/mask/components.py index 0a71af4866..ecc4d600e1 100644 --- a/plugins/extract/mask/components.py +++ b/plugins/extract/mask/components.py @@ -47,7 +47,7 @@ def predict(self, feed: np.ndarray) -> np.ndarray: # Called from the manual tool. # TODO This will only work with BS1 feed = np.zeros_like(feed) continue - parts = self.parse_parts(np.array(face.landmarks)) + parts = self.parse_parts(np.asarray(face.landmarks)) for item in parts: a_item = np.rint(np.concatenate(item)).astype("int32") hull = cv2.convexHull(a_item) diff --git a/plugins/extract/mask/extended.py b/plugins/extract/mask/extended.py index d6970cb0e5..4372f44f31 100644 --- a/plugins/extract/mask/extended.py +++ b/plugins/extract/mask/extended.py @@ -47,7 +47,7 @@ def predict(self, feed: np.ndarray) -> np.ndarray: # Called from the manual tool. # TODO This will only work with BS1 feed = np.zeros_like(feed) continue - parts = self.parse_parts(np.array(face.landmarks)) + parts = self.parse_parts(np.asarray(face.landmarks)) for item in parts: a_item = np.rint(np.concatenate(item)).astype("int32") hull = cv2.convexHull(a_item) @@ -76,8 +76,8 @@ def _adjust_mask_top(cls, landmarks: np.ndarray) -> None: qr_pnt = (landmarks[45] + mr_pnt) // 2 # Top of the eye arrays - bot_l = np.array((ql_pnt, landmarks[36], landmarks[37], landmarks[38], landmarks[39])) - bot_r = np.array((landmarks[42], landmarks[43], landmarks[44], landmarks[45], qr_pnt)) + bot_l = np.asarray((ql_pnt, landmarks[36], landmarks[37], landmarks[38], landmarks[39])) + bot_r = np.asarray((landmarks[42], landmarks[43], landmarks[44], landmarks[45], qr_pnt)) # Eyebrow arrays top_l = landmarks[17:22] diff --git a/plugins/extract/mask/unet_dfl.py b/plugins/extract/mask/unet_dfl.py index 4ca2f3dc07..93636da4e5 100644 --- a/plugins/extract/mask/unet_dfl.py +++ b/plugins/extract/mask/unet_dfl.py @@ -52,7 +52,7 @@ def init_model(self) -> None: def process_input(self, batch: BatchType) -> None: """ Compile the detected faces for prediction """ assert isinstance(batch, MaskerBatch) - batch.feed = np.array([T.cast(np.ndarray, feed.face)[..., :3] + batch.feed = np.asarray([T.cast(np.ndarray, feed.face)[..., :3] for feed in batch.feed_faces], dtype="float32") / 255.0 logger.trace("feed shape: %s", batch.feed.shape) # type: ignore diff --git a/plugins/extract/mask/vgg_clear.py b/plugins/extract/mask/vgg_clear.py index 50165f8015..5c25cce941 100644 --- a/plugins/extract/mask/vgg_clear.py +++ b/plugins/extract/mask/vgg_clear.py @@ -47,7 +47,7 @@ def init_model(self) -> None: def process_input(self, batch: BatchType) -> None: """ Compile the detected faces for prediction """ assert isinstance(batch, MaskerBatch) - input_ = np.array([T.cast(np.ndarray, feed.face)[..., :3] + input_ = np.asarray([T.cast(np.ndarray, feed.face)[..., :3] for feed in batch.feed_faces], dtype="float32") batch.feed = input_ - np.mean(input_, axis=(1, 2))[:, None, None, :] logger.trace("feed shape: %s", batch.feed.shape) # type: ignore diff --git a/plugins/extract/recognition/_base.py b/plugins/extract/recognition/_base.py index 61662e623b..e34fd795ac 100644 --- a/plugins/extract/recognition/_base.py +++ b/plugins/extract/recognition/_base.py @@ -388,7 +388,7 @@ def _get_matches(self, """ encodings = self._filter if filter_type == "filter" else self._nfilter assert encodings is not None - distances = np.array([self._find_cosine_similiarity(encodings, identity) + distances = np.asarray([self._find_cosine_similiarity(encodings, identity) for identity in identities]) is_match = np.any(distances >= self._threshold, axis=-1) # Invert for filter (set the `True` match to `False` for should filter) @@ -462,7 +462,7 @@ def __call__(self, if not self._active: return faces - identities = np.array([face.identity["vggface2"] for face, fldr in zip(faces, sub_folders) + identities = np.asarray([face.identity["vggface2"] for face, fldr in zip(faces, sub_folders) if fldr is None]) logger.trace("face_count: %s, already_filtered: %s, identity_shape: %s", # type: ignore len(faces), sum(x is not None for x in sub_folders), identities.shape) @@ -478,7 +478,7 @@ def __call__(self, should_filter.append(self._get_matches(f_type, identities)) # If any of the filter or nfilter evaluate to 'should filter' then filter out face - final_filter: list[bool] = np.array(should_filter).max(axis=0).tolist() + final_filter: list[bool] = np.asarray(should_filter).max(axis=0).tolist() logger.trace("should_filter: %s, final_filter: %s", # type: ignore should_filter, final_filter) return self._filter_faces(faces, sub_folders, final_filter) diff --git a/plugins/extract/recognition/vgg_face2.py b/plugins/extract/recognition/vgg_face2.py index acf268bfd4..669e6b41bd 100644 --- a/plugins/extract/recognition/vgg_face2.py +++ b/plugins/extract/recognition/vgg_face2.py @@ -53,7 +53,7 @@ def __init__(self, *args, **kwargs) -> None: # pylint:disable=unused-argument self.batchsize = self.config["batch-size"] # Average image provided in https://github.com/ox-vgg/vgg_face2 - self._average_img = np.array([91.4953, 103.8827, 131.0912]) + self._average_img = np.asarray([91.4953, 103.8827, 131.0912]) logger.debug("Initialized %s", self.__class__.__name__) # <<< GET MODEL >>> # @@ -72,7 +72,7 @@ def init_model(self) -> None: def process_input(self, batch: BatchType) -> None: """ Compile the detected faces for prediction """ assert isinstance(batch, RecogBatch) - batch.feed = np.array([T.cast(np.ndarray, feed.face)[..., :3] + batch.feed = np.asarray([T.cast(np.ndarray, feed.face)[..., :3] for feed in batch.feed_faces], dtype="float32") - self._average_img logger.trace("feed shape: %s", batch.feed.shape) # type:ignore diff --git a/plugins/train/model/_base/model.py b/plugins/train/model/_base/model.py index 5bc1161b17..7543913838 100644 --- a/plugins/train/model/_base/model.py +++ b/plugins/train/model/_base/model.py @@ -818,7 +818,7 @@ def _get_nodes(self, nodes: np.ndarray) -> list[tuple[str, int]]: list The (node name, output index) for each node passed in """ - anodes = np.array(nodes, dtype="object")[..., :3] + anodes = np.asarray(nodes, dtype="object")[..., :3] num_layers = anodes.shape[0] anodes = anodes[self._output_idx] if num_layers == 2 else anodes[0] @@ -891,7 +891,7 @@ def _get_filtered_structure(self) -> OrderedDict: The layer name as key with the input name and output index as value. """ # Filter output layer - out = np.array(self._config["output_layers"], dtype="object") + out = np.asarray(self._config["output_layers"], dtype="object") if out.ndim == 2: out = np.expand_dims(out, axis=1) # Needs to be expanded for _get_nodes outputs = self._get_nodes(out) diff --git a/plugins/train/model/dfl_sae.py b/plugins/train/model/dfl_sae.py index 0c54e0031d..6c8f5c61a5 100644 --- a/plugins/train/model/dfl_sae.py +++ b/plugins/train/model/dfl_sae.py @@ -62,7 +62,7 @@ def build_model(self, inputs): if self.architecture == "liae": inter_both = self.inter_liae("both", enc_output_shape) - int_output_shape = (np.array(inter_both.output_shape[1:]) * (1, 1, 2)).tolist() + int_output_shape = (np.asarray(inter_both.output_shape[1:]) * (1, 1, 2)).tolist() inter_a = Concatenate()([inter_both(encoder_a), inter_both(encoder_a)]) inter_b = Concatenate()([self.inter_liae("b", enc_output_shape)(encoder_b), diff --git a/plugins/train/trainer/_base.py b/plugins/train/trainer/_base.py index dbbc74c9fd..baeea1465e 100644 --- a/plugins/train/trainer/_base.py +++ b/plugins/train/trainer/_base.py @@ -418,7 +418,7 @@ def __init__(self, self.images: dict[T.Literal["a", "b"], list[np.ndarray]] = {} self._coverage_ratio = coverage_ratio self._mask_opacity = mask_opacity / 100.0 - self._mask_color = np.array(hex_to_rgb(mask_color))[..., 2::-1] / 255. + self._mask_color = np.asarray(hex_to_rgb(mask_color))[..., 2::-1] / 255. logger.debug("Initialized %s", self.__class__.__name__) def toggle_mask_display(self) -> None: @@ -479,7 +479,7 @@ def _resize_sample(cls, logger.debug("Resizing sample: (side: '%s', sample.shape: %s, target_size: %s, scale: %s)", side, sample.shape, target_size, scale) interpn = cv2.INTER_CUBIC if scale > 1.0 else cv2.INTER_AREA - retval = np.array([cv2.resize(img, (target_size, target_size), interpolation=interpn) + retval = np.asarray([cv2.resize(img, (target_size, target_size), interpolation=interpn) for img in sample]) logger.debug("Resized sample: (side: '%s' shape: %s)", side, retval.shape) return retval @@ -916,7 +916,7 @@ def get_transpose_axes(num): x_axes = list(range(1, num - 1, 2)) return y_axes, x_axes, [num - 1] - images_shape = np.array(images.shape) + images_shape = np.asarray(images.shape) new_axes = get_transpose_axes(len(images_shape)) new_shape = [np.prod(images_shape[x]) for x in new_axes] logger.debug("Stacked images") diff --git a/scripts/convert.py b/scripts/convert.py index 7ae3f0eca6..2f02a9e26d 100644 --- a/scripts/convert.py +++ b/scripts/convert.py @@ -59,7 +59,7 @@ class ConvertItem: inbound: ExtractMedia feed_faces: list[AlignedFace] = field(default_factory=list) reference_faces: list[AlignedFace] = field(default_factory=list) - swapped_faces: np.ndarray = np.array([]) + swapped_faces: np.ndarray = np.asarray([]) class Convert(): @@ -976,7 +976,7 @@ def _process_batch(self, batch: list[ConvertItem], faces_seen: int): batch_size = None predicted = self._predict(feed_faces, batch_size) else: - predicted = np.array([]) + predicted = np.asarray([]) self._queue_out_frames(batch, predicted) diff --git a/scripts/extract.py b/scripts/extract.py index da9edf1d15..6a9d922630 100644 --- a/scripts/extract.py +++ b/scripts/extract.py @@ -208,8 +208,8 @@ def __init__(self, logger.debug("Filter not selected. Exiting %s", self.__class__.__name__) return - self._embeddings: list[np.ndarray] = [np.array([]) for _ in self._filter_files] - self._nembeddings: list[np.ndarray] = [np.array([]) for _ in self._nfilter_files] + self._embeddings: list[np.ndarray] = [np.asarray([]) for _ in self._filter_files] + self._nembeddings: list[np.ndarray] = [np.asarray([]) for _ in self._nfilter_files] self._extractor = extractor self._get_embeddings() @@ -230,7 +230,7 @@ def embeddings(self) -> np.ndarray: if self._embeddings and all(np.any(e) for e in self._embeddings): retval = np.concatenate(self._embeddings, axis=0) else: - retval = np.array([]) + retval = np.asarray([]) return retval @property @@ -239,7 +239,7 @@ def n_embeddings(self) -> np.ndarray: if self._nembeddings and all(np.any(e) for e in self._nembeddings): retval = np.concatenate(self._nembeddings, axis=0) else: - retval = np.array([]) + retval = np.asarray([]) return retval @classmethod @@ -341,19 +341,19 @@ def _identity_from_extracted(cls, filename) -> tuple[np.ndarray, bool]: """ if os.path.splitext(filename)[-1].lower() != ".png": logger.debug("'%s' not a png. Returning empty array", filename) - return np.array([]), False + return np.asarray([]), False meta = read_image_meta(filename) if "itxt" not in meta or "alignments" not in meta["itxt"]: logger.debug("'%s' does not contain faceswap data. Returning empty array", filename) - return np.array([]), False + return np.asarray([]), False align: "PNGHeaderAlignmentsDict" = meta["itxt"]["alignments"] if "identity" not in align or "vggface2" not in align["identity"]: logger.debug("'%s' does not contain identity data. Returning empty array", filename) - return np.array([]), True + return np.asarray([]), True - retval = np.array(align["identity"]["vggface2"]) + retval = np.asarray(align["identity"]["vggface2"]) logger.debug("Obtained identity for '%s'. Shape: %s", filename, retval.shape) return retval, True @@ -376,7 +376,7 @@ def _process_extracted(self, item: ExtractMedia) -> None: lbl = "filter" if is_filter else "nfilter" filelist = self._filter_files if is_filter else self._nfilter_files embeddings = self._embeddings if is_filter else self._nembeddings - identities = np.array([face.identity["vggface2"] for face in item.detected_faces]) + identities = np.asarray([face.identity["vggface2"] for face in item.detected_faces]) idx = filelist.index(item.filename) if len(item.detected_faces) == 0: diff --git a/scripts/fsmedia.py b/scripts/fsmedia.py index 9d1fbdbb4e..7da46f0544 100644 --- a/scripts/fsmedia.py +++ b/scripts/fsmedia.py @@ -530,7 +530,7 @@ def _annotate_face_box(self, face: AlignedFace) -> None: cv2.rectangle(face.face, tuple(roi[:2]), tuple(roi[2:]), color, 1) # Size in top right corner - roi_pnts = np.array([[roi[0], roi[1]], + roi_pnts = np.asarray([[roi[0], roi[1]], [roi[0], roi[3]], [roi[2], roi[3]], [roi[2], roi[1]]]) diff --git a/tests/lib/gui/stats/event_reader_test.py b/tests/lib/gui/stats/event_reader_test.py index 216790550b..9bb5406650 100644 --- a/tests/lib/gui/stats/event_reader_test.py +++ b/tests/lib/gui/stats/event_reader_test.py @@ -67,8 +67,8 @@ def test__logfiles(tmp_path: str): def test__cachedata(): """ Test the _CacheData class operates correctly """ labels = ["label_a", "label_b"] - timestamps = np.array([1.23, 4.56], dtype="float64") - loss = np.array([[2.34, 5.67], [3.45, 6.78]], dtype="float32") + timestamps = np.asarray([1.23, 4.56], dtype="float64") + loss = np.asarray([[2.34, 5.67], [3.45, 6.78]], dtype="float32") # Initial test cache = _CacheData(labels, timestamps, loss) @@ -79,8 +79,8 @@ def test__cachedata(): np.testing.assert_array_equal(cache.loss, loss) # Add data test - new_timestamps = np.array([2.34, 6.78], dtype="float64") - new_loss = np.array([[3.45, 7.89], [8.90, 1.23]], dtype="float32") + new_timestamps = np.asarray([2.34, 6.78], dtype="float64") + new_loss = np.asarray([[3.45, 7.89], [8.90, 1.23]], dtype="float32") expected_timestamps = np.concatenate([timestamps, new_timestamps]) expected_loss = np.concatenate([loss, new_loss]) @@ -113,8 +113,8 @@ def test_is_cached() -> None: cache = _Cache() data = _CacheData(["test_1", "test_2"], - np.array([1.23, ], dtype="float64"), - np.array([[2.34, ], [4.56]], dtype="float32")) + np.asarray([1.23, ], dtype="float64"), + np.asarray([[2.34, ], [4.56]], dtype="float32")) cache._data[1] = data assert cache.is_cached(1) assert not cache.is_cached(2) @@ -138,8 +138,8 @@ def test_cache_data(mocker: pytest_mock.MockerFixture) -> None: cache.cache_data(session_id, data, labels, is_live) assert cache._loss_labels == labels assert cache.is_cached(session_id) - np.testing.assert_array_equal(cache._data[session_id].timestamps, np.array([4., 5.])) - np.testing.assert_array_equal(cache._data[session_id].loss, np.array([[1., 2.], [3., 4.]])) + np.testing.assert_array_equal(cache._data[session_id].timestamps, np.asarray([4., 5.])) + np.testing.assert_array_equal(cache._data[session_id].loss, np.asarray([[1., 2.], [3., 4.]])) add_live = mocker.patch("lib.gui.analysis.event_reader._Cache._add_latest_live") is_live = True @@ -156,22 +156,22 @@ def test__to_numpy() -> None: # Non-live is_live = False times, loss = cache._to_numpy(data, is_live) - np.testing.assert_array_equal(times, np.array([4., 5.])) - np.testing.assert_array_equal(loss, np.array([[1., 2.], [3., 4.]])) + np.testing.assert_array_equal(times, np.asarray([4., 5.])) + np.testing.assert_array_equal(loss, np.asarray([[1., 2.], [3., 4.]])) # Correctly collected live is_live = True times, loss = cache._to_numpy(data, is_live) - np.testing.assert_array_equal(times, np.array([4., 5.])) - np.testing.assert_array_equal(loss, np.array([[1., 2.], [3., 4.]])) + np.testing.assert_array_equal(times, np.asarray([4., 5.])) + np.testing.assert_array_equal(loss, np.asarray([[1., 2.], [3., 4.]])) # Incorrectly collected live live_data = {1: EventData(4., [1., 2.]), 2: EventData(5., [3.]), 3: EventData(6., [4., 5., 6.])} times, loss = cache._to_numpy(live_data, is_live) - np.testing.assert_array_equal(times, np.array([4.])) - np.testing.assert_array_equal(loss, np.array([[1., 2.]])) + np.testing.assert_array_equal(times, np.asarray([4.])) + np.testing.assert_array_equal(loss, np.asarray([[1., 2.]])) @staticmethod def test__collect_carry_over() -> None: @@ -195,8 +195,8 @@ def test__process_data() -> None: 2: EventData(5., [7., 8.]), 3: EventData(6., [9.])} is_live = False - expected_timestamps = np.array([4., 5.]) - expected_loss = np.array([[5., 6.], [7., 8.]]) + expected_timestamps = np.asarray([4., 5.]) + expected_loss = np.asarray([[5., 6.], [7., 8.]]) expected_carry_over = {3: EventData(6., [9.])} timestamps, loss = cache._process_data(data, is_live) @@ -216,10 +216,10 @@ def test__add_latest_live() -> None: session_id = 1 labels = ['label1', 'label2'] data = {1: EventData(3., [5., 6.]), 2: EventData(4., [7., 8.])} - new_timestamp = np.array([5.], dtype="float64") - new_loss = np.array([[8., 9.]], dtype="float32") - expected_timestamps = np.array([3., 4., 5.]) - expected_loss = np.array([[5., 6.], [7., 8.], [8., 9.]]) + new_timestamp = np.asarray([5.], dtype="float64") + new_loss = np.asarray([[8., 9.]], dtype="float32") + expected_timestamps = np.asarray([3., 4., 5.]) + expected_loss = np.asarray([[5., 6.], [7., 8.], [8., 9.]]) cache = _Cache() cache.cache_data(session_id, data, labels) # Initial data @@ -241,8 +241,8 @@ def test_get_data() -> None: labels = ['label1', 'label2'] data = {1: EventData(3., [5., 6.]), 2: EventData(4., [7., 8.])} - expected_timestamps = np.array([3., 4.]) - expected_loss = np.array([[5., 6.], [7., 8.]]) + expected_timestamps = np.asarray([3., 4.]) + expected_loss = np.asarray([[5., 6.], [7., 8.]]) cache.cache_data(session_id, data, labels, is_live=False) get_timestamps = cache.get_data(session_id, "timestamps") @@ -708,7 +708,7 @@ def test__get_outputs(self, event_parser_instance: _EventParser) -> None: outputs = [["decoder_a", 1, 0], ["decoder_b", 1, 0]] model_config = {"output_layers": outputs} - expected = np.array([[out] for out in outputs]) + expected = np.asarray([[out] for out in outputs]) actual = event_parser_instance._get_outputs(model_config) assert isinstance(actual, np.ndarray) assert actual.shape == (2, 1, 3) diff --git a/tests/tools/alignments/media_test.py b/tests/tools/alignments/media_test.py index 17e45a517e..792fb39025 100644 --- a/tests/tools/alignments/media_test.py +++ b/tests/tools/alignments/media_test.py @@ -819,9 +819,9 @@ def test_get_faces_in_frame(self, faces.get_faces_in_frame(frame, update=True, image=img) faces.get_faces.assert_called_once_with(frame, image=img) - _params = [(np.array(([[25, 47], [32, 232], [244, 237], [240, 21]])), 216), - (np.array(([[127, 392], [403, 510], [32, 237], [19, 210]])), 211), - (np.array(([[26, 1927], [112, 1234], [1683, 1433], [78, 1155]])), 773)] + _params = [(np.asarray(([[25, 47], [32, 232], [244, 237], [240, 21]])), 216), + (np.asarray(([[127, 392], [403, 510], [32, 237], [19, 210]])), 211), + (np.asarray(([[26, 1927], [112, 1234], [1683, 1433], [78, 1155]])), 773)] @pytest.mark.parametrize("roi,expected", _params) def test_get_roi_size_for_frame(self, diff --git a/tests/utils.py b/tests/utils.py index b357dc13c4..3916d1aef7 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -63,7 +63,7 @@ def to_categorical(var_y, num_classes=None, dtype='float32'): >>> [ 0., 0., 1.], >>> [ 1., 0., 0.]], dtype=float32) """ - var_y = np.array(var_y, dtype='int') + var_y = np.asarray(var_y, dtype='int') input_shape = var_y.shape if input_shape and input_shape[-1] == 1 and len(input_shape) > 1: input_shape = tuple(input_shape[:-1]) diff --git a/tools/alignments/jobs.py b/tools/alignments/jobs.py index 578ade1ea3..3da57d1548 100644 --- a/tools/alignments/jobs.py +++ b/tools/alignments/jobs.py @@ -627,7 +627,7 @@ def _normalize(self) -> None: continue # We should only be normalizing a single face, so just take # the first landmarks found - landmarks = np.array(val[0]["landmarks_xy"]).reshape((lm_count, 2, 1)) + landmarks = np.asarray(val[0]["landmarks_xy"]).reshape((lm_count, 2, 1)) start = end end = start + landmarks.shape[2] # Store in one big array diff --git a/tools/alignments/jobs_faces.py b/tools/alignments/jobs_faces.py index ac2205f89c..a22b74cfe9 100644 --- a/tools/alignments/jobs_faces.py +++ b/tools/alignments/jobs_faces.py @@ -108,7 +108,7 @@ def _extract_alignment(self, metadata: dict) -> tuple[str, int, AlignmentFileDic alignments file in positin 2 """ alignment = metadata["alignments"] - alignment["landmarks_xy"] = np.array(alignment["landmarks_xy"], dtype="float32") + alignment["landmarks_xy"] = np.asarray(alignment["landmarks_xy"], dtype="float32") src = metadata["source"] frame_name = src["source_filename"] diff --git a/tools/alignments/jobs_frames.py b/tools/alignments/jobs_frames.py index 3c25b48121..a5be7d9e06 100644 --- a/tools/alignments/jobs_frames.py +++ b/tools/alignments/jobs_frames.py @@ -154,7 +154,7 @@ def _annotate_pose(cls, image: np.ndarray, face: DetectedFace) -> None: face: :class:`lib.align.DetectedFace` The aligned face loaded for head centering """ - center = np.array((face.aligned.size / 2, + center = np.asarray((face.aligned.size / 2, face.aligned.size / 2)).astype("int32").reshape(1, 2) center = np.rint(face.aligned.transform_points(center, invert=True)).astype("int32") points = face.aligned.pose.xyz_2d * face.aligned.size @@ -456,11 +456,11 @@ def _pad_legacy_masks(cls, detected_face: DetectedFace) -> None: new_size = int(size + (size * EXTRACT_RATIOS["face"]) / 2) shift = np.rint(offset * (size - (size * EXTRACT_RATIOS["face"]))).astype("int32") - pos = np.array([(new_size // 2 - size // 2) - shift[1], + pos = np.asarray([(new_size // 2 - size // 2) - shift[1], (new_size // 2) + (size // 2) - shift[1], (new_size // 2 - size // 2) - shift[0], (new_size // 2) + (size // 2) - shift[0]]) - bounds = np.array([max(0, pos[0]), min(new_size, pos[1]), + bounds = np.asarray([max(0, pos[0]), min(new_size, pos[1]), max(0, pos[2]), min(new_size, pos[3])]) slice_in = [slice(0 - (pos[0] - bounds[0]), size - (pos[1] - bounds[1])), diff --git a/tools/manual/detected_faces.py b/tools/manual/detected_faces.py index 7dcd90fc83..e230e515a5 100644 --- a/tools/manual/detected_faces.py +++ b/tools/manual/detected_faces.py @@ -307,7 +307,7 @@ def save(self) -> None: len(frames)) for idx, faces in zip(frames, - np.array(self._frame_faces, dtype="object")[np.array(frames)]): + np.asarray(self._frame_faces, dtype="object")[np.asarray(frames)]): frame = self._sorted_frame_names[idx] self._alignments.data[frame]["faces"] = [face.to_alignment() for face in faces] diff --git a/tools/manual/faceviewer/frame.py b/tools/manual/faceviewer/frame.py index 5c6c8f024b..1173cd0129 100644 --- a/tools/manual/faceviewer/frame.py +++ b/tools/manual/faceviewer/frame.py @@ -470,7 +470,7 @@ def get_muted_color(self, color_key: str) -> str: The hex color code of the muted color """ scale = 0.65 - hls = np.array(colorsys.rgb_to_hls(*hex_to_rgb(self.control_colors[color_key]))) + hls = np.asarray(colorsys.rgb_to_hls(*hex_to_rgb(self.control_colors[color_key]))) scale = (1 - scale) + 1 if hls[1] < 120 else scale hls[1] = max(0., min(256., scale * hls[1])) rgb = np.clip(np.rint(colorsys.hls_to_rgb(*hls)).astype("uint8"), 0, 255) @@ -669,7 +669,7 @@ def _get_grid(self) -> None: num=labels.shape[1], endpoint=False, dtype="int") - self._grid = np.array((*labels, *np.meshgrid(x_coords, y_coords)), dtype="int") + self._grid = np.asarray((*labels, *np.meshgrid(x_coords, y_coords)), dtype="int") logger.debug(self._grid.shape) def _get_labels(self) -> np.ndarray | None: @@ -692,7 +692,7 @@ def _get_labels(self) -> np.ndarray | None: rows = ceil(face_count / columns) remainder = face_count % columns padding = [] if remainder == 0 else [-1 for _ in range(columns - remainder)] - labels = np.array((self._raw_indices["frame"] + padding, + labels = np.asarray((self._raw_indices["frame"] + padding, self._raw_indices["face"] + padding), dtype="int").reshape((2, rows, columns)) logger.debug("face-count: %s, columns: %s, rows: %s, remainder: %s, padding: %s, labels " @@ -714,7 +714,7 @@ def _get_display_faces(self): columns, rows = self.columns_rows face_count = len(self._raw_indices["frame"]) padding = [None for _ in range(face_count, columns * rows)] - self._display_faces = np.array([None if idx is None else current_faces[idx][face_idx] + self._display_faces = np.asarray([None if idx is None else current_faces[idx][face_idx] for idx, face_idx in zip(self._raw_indices["frame"] + padding, self._raw_indices["face"] + padding)], diff --git a/tools/manual/faceviewer/interact.py b/tools/manual/faceviewer/interact.py index 124629320c..1c75d66771 100644 --- a/tools/manual/faceviewer/interact.py +++ b/tools/manual/faceviewer/interact.py @@ -68,10 +68,10 @@ def on_hover(self, event: tk.Event | None) -> None: mouse event) then the location of the cursor will be calculated """ if event is None: - pnts = np.array((self._canvas.winfo_pointerx(), self._canvas.winfo_pointery())) - pnts -= np.array((self._canvas.winfo_rootx(), self._canvas.winfo_rooty())) + pnts = np.asarray((self._canvas.winfo_pointerx(), self._canvas.winfo_pointery())) + pnts -= np.asarray((self._canvas.winfo_rootx(), self._canvas.winfo_rooty())) else: - pnts = np.array((event.x, event.y)) + pnts = np.asarray((event.x, event.y)) coords = (int(self._canvas.canvasx(pnts[0])), int(self._canvas.canvasy(pnts[1]))) face = self._viewport.face_from_point(*coords) diff --git a/tools/manual/faceviewer/viewport.py b/tools/manual/faceviewer/viewport.py index 94486a82f8..9f30065f3f 100644 --- a/tools/manual/faceviewer/viewport.py +++ b/tools/manual/faceviewer/viewport.py @@ -366,12 +366,12 @@ def face_from_point(self, point_x: int, point_y: int) -> np.ndarray: -1 """ if not self._grid.is_valid or point_x > self._grid.dimensions[0]: - retval = np.array((-1, -1, -1, -1)) + retval = np.asarray((-1, -1, -1, -1)) else: x_idx = np.searchsorted(self._objects.visible_grid[2, 0, :], point_x, side="left") - 1 y_idx = np.searchsorted(self._objects.visible_grid[3, :, 0], point_y, side="left") - 1 if x_idx < 0 or y_idx < 0: - retval = np.array((-1, -1, -1, -1)) + retval = np.asarray((-1, -1, -1, -1)) else: retval = self._objects.visible_grid[:, y_idx, x_idx] logger.trace(retval) # type:ignore[attr-defined] @@ -558,7 +558,7 @@ def _top_left(self) -> np.ndarray: retval = [0.0, 0.0] else: retval = self._canvas.coords(self._images[0][0]) - return np.array(retval, dtype="int") + return np.asarray(retval, dtype="int") def update(self) -> None: """ Load and unload thumbnails in the visible area of the faces viewer. """ @@ -650,8 +650,8 @@ def _add_rows(self, existing_rows: int, required_rows: int) -> None: meshes.append([{} if face is None else self._recycler.get_mesh(face) for face in self._visible_faces[row]]) - a_images = np.array(images) - a_meshes = np.array(meshes) + a_images = np.asarray(images) + a_meshes = np.asarray(meshes) if not np.any(self._images): logger.debug("Adding initial viewport objects: (image shapes: %s, mesh shapes: %s)", diff --git a/tools/manual/frameviewer/editor/_base.py b/tools/manual/frameviewer/editor/_base.py index d295c0d847..3987cb9889 100644 --- a/tools/manual/frameviewer/editor/_base.py +++ b/tools/manual/frameviewer/editor/_base.py @@ -93,7 +93,7 @@ def _zoomed_roi(self): top = 0 right = self._globals.frame_display_dims[0] / 2 + half_size bottom = self._globals.frame_display_dims[1] - retval = np.rint(np.array((left, top, right, bottom))).astype("int32") + retval = np.rint(np.asarray((left, top, right, bottom))).astype("int32") logger.trace("Zoomed ROI: %s", retval) return retval diff --git a/tools/manual/frameviewer/editor/bounding_box.py b/tools/manual/frameviewer/editor/bounding_box.py index d546feb172..a8be8e60cc 100644 --- a/tools/manual/frameviewer/editor/bounding_box.py +++ b/tools/manual/frameviewer/editor/bounding_box.py @@ -103,7 +103,7 @@ def update_annotation(self): key = "bb_box" color = self._control_color for idx, face in enumerate(self._face_iterator): - box = np.array([(face.left, face.top), (face.right, face.bottom)]) + box = np.asarray([(face.left, face.top), (face.right, face.bottom)]) box = self._scale_to_display(box).astype("int32").flatten() kwargs = {"outline": color, "width": 1} logger.trace("frame_index: %s, face_index: %s, box: %s, kwargs: %s", @@ -363,7 +363,7 @@ def _move(self, event): shift = (event.x - self._drag_data["current_location"][0], event.y - self._drag_data["current_location"][1]) face_tag = f"bb_box_face_{face_idx}" - coords = np.array(self._canvas.coords(face_tag)) + (*shift, *shift) + coords = np.asarray(self._canvas.coords(face_tag)) + (*shift, *shift) logger.trace("face_tag: %s, shift: %s, new co-ords: %s", face_tag, shift, coords) self._det_faces.update.bounding_box(self._globals.frame_index, face_idx, @@ -382,7 +382,7 @@ def _coords_to_bounding_box(self, coords): """ logger.trace("in: %s", coords) coords = self.scale_from_display( - np.array(coords).reshape((2, 2))).flatten().astype("int32") + np.asarray(coords).reshape((2, 2))).flatten().astype("int32") logger.trace("out: %s", coords) return (coords[0], coords[2] - coords[0], coords[1], coords[3] - coords[1]) diff --git a/tools/manual/frameviewer/editor/extract_box.py b/tools/manual/frameviewer/editor/extract_box.py index ffe8bf4734..3429f9843b 100644 --- a/tools/manual/frameviewer/editor/extract_box.py +++ b/tools/manual/frameviewer/editor/extract_box.py @@ -56,7 +56,7 @@ def update_annotation(self): for idx, face in enumerate(self._face_iterator): logger.trace("Drawing Extract Box: (idx: %s)", idx) if self._globals.is_zoomed: - box = np.array((roi[0], roi[1], roi[2], roi[1], roi[2], roi[3], roi[0], roi[3])) + box = np.asarray((roi[0], roi[1], roi[2], roi[1], roi[2], roi[3], roi[0], roi[3])) else: aligned = AlignedFace(face.landmarks_xy, centering="face") box = self._scale_to_display(aligned.original_roi).flatten() @@ -193,10 +193,10 @@ def _check_cursor_rotate(self, event): ``True`` if cursor is over a rotate point otherwise ``False`` """ distance = 30 - boxes = np.array([np.array(self._canvas.coords(item_id)).reshape(4, 2) + boxes = np.asarray([np.asarray(self._canvas.coords(item_id)).reshape(4, 2) for item_id in self._canvas.find_withtag("eb_box") if self._canvas.itemcget(item_id, "state") != "hidden"]) - position = np.array((event.x, event.y)).astype("float32") + position = np.asarray((event.x, event.y)).astype("float32") for face_idx, points in enumerate(boxes): if any(np.all(position > point - distance) and np.all(position < point + distance) for point in points): @@ -226,7 +226,7 @@ def _drag_start(self, event): self._drag_data = {} self._drag_callback = None return - self._drag_data["current_location"] = np.array((event.x, event.y)) + self._drag_data["current_location"] = np.asarray((event.x, event.y)) callback = {"anchor": self._resize, "rotate": self._rotate, "box": self._move} self._drag_callback = callback[self._mouse_location[0]] @@ -256,7 +256,7 @@ def _move(self, event): return shift_x = event.x - self._drag_data["current_location"][0] shift_y = event.y - self._drag_data["current_location"][1] - scaled_shift = self.scale_from_display(np.array((shift_x, shift_y)), do_offset=False) + scaled_shift = self.scale_from_display(np.asarray((shift_x, shift_y)), do_offset=False) self._det_faces.update.landmarks(self._globals.frame_index, self._mouse_location[1], *scaled_shift) @@ -272,9 +272,9 @@ def _resize(self, event): """ face_idx = self._mouse_location[1] face_tag = f"eb_box_face_{face_idx}" - position = np.array((event.x, event.y)) - box = np.array(self._canvas.coords(face_tag)) - center = np.array((sum(box[0::2]) / 4, sum(box[1::2]) / 4)) + position = np.asarray((event.x, event.y)) + box = np.asarray(self._canvas.coords(face_tag)) + center = np.asarray((sum(box[0::2]) / 4, sum(box[1::2]) / 4)) if not self._check_in_bounds(center, box, position): logger.trace("Drag out of bounds. Not updating") self._drag_data["current_location"] = position @@ -319,11 +319,11 @@ def _check_in_bounds(self, center, box, position): ``True`` if the drag operation does not cross the center point otherwise ``False`` """ # Generate lines that span the full frame (x and y) along the center point - center_x = np.array(((center[0], 0), (center[0], self._globals.frame_display_dims[1]))) - center_y = np.array(((0, center[1]), (self._globals.frame_display_dims[0], center[1]))) + center_x = np.asarray(((center[0], 0), (center[0], self._globals.frame_display_dims[1]))) + center_y = np.asarray(((0, center[1]), (self._globals.frame_display_dims[0], center[1]))) # Generate a line coming from the current corner location to the current cursor position - full_line = np.array((box[self._mouse_location[2] * 2:self._mouse_location[2] * 2 + 2], + full_line = np.asarray((box[self._mouse_location[2] * 2:self._mouse_location[2] * 2 + 2], position)) logger.trace("center: %s, center_x_line: %s, center_y_line: %s, full_line: %s", center, center_x, center_y, full_line) @@ -367,10 +367,10 @@ def _rotate(self, event): """ face_idx = self._mouse_location[1] face_tag = f"eb_box_face_{face_idx}" - box = np.array(self._canvas.coords(face_tag)) - position = np.array((event.x, event.y)) + box = np.asarray(self._canvas.coords(face_tag)) + position = np.asarray((event.x, event.y)) - center = np.array((sum(box[0::2]) / 4, sum(box[1::2]) / 4)) + center = np.asarray((sum(box[0::2]) / 4, sum(box[1::2]) / 4)) init_to_center = self._drag_data["current_location"] - center new_to_center = position - center angle = np.rad2deg(np.arctan2(*new_to_center) - np.arctan2(*init_to_center)) diff --git a/tools/manual/frameviewer/editor/landmarks.py b/tools/manual/frameviewer/editor/landmarks.py index e59517e7b0..365dc16a2c 100644 --- a/tools/manual/frameviewer/editor/landmarks.py +++ b/tools/manual/frameviewer/editor/landmarks.py @@ -128,7 +128,7 @@ def _label_landmark(self, bounding_box, face_index, landmark_index): """ if not self._is_active: return - top_left = np.array(bounding_box[:2]) - 20 + top_left = np.asarray(bounding_box[:2]) - 20 # NB The text must be visible to be able to get the bounding box, so set to hidden # after the bounding box has been retrieved @@ -341,7 +341,7 @@ def _snap_selection_to_points(self): particularly true in zoomed mode. The selection box is therefore redrawn to bind just outside of the selected points. """ - all_coords = np.array([self._canvas.coords(item_id) + all_coords = np.asarray([self._canvas.coords(item_id) for item_id in self._canvas.find_withtag("lm_selected")]) mins = np.min(all_coords, axis=0) maxes = np.max(all_coords, axis=0) @@ -365,9 +365,9 @@ def _move_point(self, event): shift_y = event.y - self._drag_data["start_location"][1] if self._globals.is_zoomed: - scaled_shift = np.array((shift_x, shift_y)) + scaled_shift = np.asarray((shift_x, shift_y)) else: - scaled_shift = self.scale_from_display(np.array((shift_x, shift_y)), do_offset=False) + scaled_shift = self.scale_from_display(np.asarray((shift_x, shift_y)), do_offset=False) self._det_faces.update.landmark(self._globals.frame_index, face_idx, lm_idx, @@ -401,9 +401,9 @@ def _move_selection(self, event): shift_x = event.x - self._drag_data["start_location"][0] shift_y = event.y - self._drag_data["start_location"][1] if self._globals.is_zoomed: - scaled_shift = np.array((shift_x, shift_y)) + scaled_shift = np.asarray((shift_x, shift_y)) else: - scaled_shift = self.scale_from_display(np.array((shift_x, shift_y)), do_offset=False) + scaled_shift = self.scale_from_display(np.asarray((shift_x, shift_y)), do_offset=False) self._canvas.move(self._selection_box, shift_x, shift_y) self._det_faces.update.landmark(self._globals.frame_index, diff --git a/tools/manual/frameviewer/editor/mask.py b/tools/manual/frameviewer/editor/mask.py index fec2c92d13..c517fd6b30 100644 --- a/tools/manual/frameviewer/editor/mask.py +++ b/tools/manual/frameviewer/editor/mask.py @@ -163,7 +163,7 @@ def update_annotation(self): key = self.__class__.__name__ mask_type = self._control_vars["display"]["MaskType"].get().lower() color = self._control_color[1:] - rgb_color = np.array(tuple(int(color[i:i + 2], 16) for i in (0, 2, 4))) + rgb_color = np.asarray(tuple(int(color[i:i + 2], 16) for i in (0, 2, 4))) roi_color = self._annotation_formats["ExtractBox"]["color"].get() opacity = self._opacity for idx, face in enumerate(self._face_iterator): @@ -247,12 +247,12 @@ def _set_full_frame_meta(self, mask, mask_scale): slice(int(round(min_max["min"][0])), int(round(min_max["max"][0])))) # Adjust affine matrix for internal mask size and display dimensions - adjustments = (np.array([[mask_scale, 0., 0.], [0., mask_scale, 0.]]), - np.array([[1 / self._globals.current_frame.scale, 0., 0.], + adjustments = (np.asarray([[mask_scale, 0., 0.], [0., mask_scale, 0.]]), + np.asarray([[1 / self._globals.current_frame.scale, 0., 0.], [0., 1 / self._globals.current_frame.scale, 0.], [0., 0., 1.]])) in_matrix = np.dot(adjustments[0], - np.concatenate((mask.affine_matrix, np.array([[0., 0., 1.]])))) + np.concatenate((mask.affine_matrix, np.asarray([[0., 0., 1.]])))) affine_matrix = np.dot(in_matrix, adjustments[1]) # Get the size of the mask roi box in the frame @@ -375,7 +375,7 @@ def _update_roi_box(self, mask, face_index, color): """ if self._globals.is_zoomed: roi = self._zoomed_roi - box = np.array((roi[0], roi[1], roi[2], roi[1], roi[2], roi[3], roi[0], roi[3])) + box = np.asarray((roi[0], roi[1], roi[2], roi[1], roi[2], roi[3], roi[0], roi[3])) else: box = self._scale_to_display(mask.original_roi).flatten() top_left = box[:2] - 10 @@ -455,14 +455,14 @@ def _drag_start(self, event, control_click=False): # pylint:disable=arguments-d self._drag_data = {} self._drag_callback = None else: - self._drag_data["starting_location"] = np.array((event.x, event.y)) + self._drag_data["starting_location"] = np.asarray((event.x, event.y)) self._drag_data["control_click"] = control_click - self._drag_data["color"] = np.array(tuple(int(self._control_color[1:][i:i + 2], 16) + self._drag_data["color"] = np.asarray(tuple(int(self._control_color[1:][i:i + 2], 16) for i in (0, 2, 4))) self._drag_data["opacity"] = self._opacity self._get_cursor_shape_mark( self._meta["mask"][face_idx], - np.array(((event.x, event.y), )), + np.asarray(((event.x, event.y), )), face_idx) self._drag_callback = self._paint @@ -475,7 +475,7 @@ def _paint(self, event): The tkinter mouse event. """ face_idx = self._mouse_location[1] - line = np.array((self._drag_data["starting_location"], (event.x, event.y))) + line = np.asarray((self._drag_data["starting_location"], (event.x, event.y))) line, scale = self._transform_points(face_idx, line) brush_radius = int(round(self._brush_radius * scale)) color = 0 if self._edit_mode == "erase" else 255 @@ -490,7 +490,7 @@ def _paint(self, event): face_idx, self._drag_data["color"], self._drag_data["opacity"]) - self._drag_data["starting_location"] = np.array((event.x, event.y)) + self._drag_data["starting_location"] = np.asarray((event.x, event.y)) self._update_cursor(event) def _transform_points(self, face_index, points): @@ -530,8 +530,8 @@ def _drag_stop(self, event): if not self._drag_data: return face_idx = self._mouse_location[1] - location = np.array(((event.x, event.y), )) - if np.array_equal(self._drag_data["starting_location"], location[0]): + location = np.asarray(((event.x, event.y), )) + if np.asarray_equal(self._drag_data["starting_location"], location[0]): self._get_cursor_shape_mark(self._meta["mask"][face_idx], location, face_idx) self._mask_to_alignments(face_idx) self._drag_data = {} diff --git a/tools/preview/viewer.py b/tools/preview/viewer.py index 7abe11b96d..46ad75f220 100644 --- a/tools/preview/viewer.py +++ b/tools/preview/viewer.py @@ -66,8 +66,8 @@ def __init__(self, app: Preview, size: int, padding: int) -> None: self._faces = _Faces() self._centering: CenteringType | None = None - self._faces_source: np.ndarray = np.array([]) - self._faces_dest: np.ndarray = np.array([]) + self._faces_source: np.ndarray = np.asarray([]) + self._faces_dest: np.ndarray = np.asarray([]) self._tk_image: ImageTk.PhotoImage | None = None # Set from Samples diff --git a/tools/sort/sort_methods.py b/tools/sort/sort_methods.py index f2a4b29526..67ab8ab281 100644 --- a/tools/sort/sort_methods.py +++ b/tools/sort/sort_methods.py @@ -358,7 +358,7 @@ def _binning_linear_threshold(self, units: str = "", multiplier: int = 1) -> lis list List of bins of filenames """ - sizes = np.array([i[1] for i in self._result]) + sizes = np.asarray([i[1] for i in self._result]) thresholds = np.linspace(sizes.min(), sizes.max(), self._num_bins + 1) labels = self._get_unique_labels(thresholds * multiplier) @@ -451,7 +451,7 @@ def _mask_face(cls, image: np.ndarray, alignments: PNGHeaderAlignmentsDict) -> n """ det_face = DetectedFace() det_face.from_png_meta(alignments) - aln_face = AlignedFace(np.array(alignments["landmarks_xy"], dtype="float32"), + aln_face = AlignedFace(np.asarray(alignments["landmarks_xy"], dtype="float32"), image=image, centering="legacy", size=256, @@ -729,9 +729,9 @@ def _convert_color(self, image: np.ndarray) -> np.ndarray: The color converted image """ if self._method == 'gray': - conversion = np.array([[0.0722], [0.7152], [0.2126]]) + conversion = np.asarray([[0.0722], [0.7152], [0.2126]]) else: - conversion = np.array([[0.25, 0.5, 0.25], [-0.5, 0.0, 0.5], [-0.25, 0.5, -0.25]]) + conversion = np.asarray([[0.25, 0.5, 0.25], [-0.5, 0.0, 0.5], [-0.25, 0.5, -0.25]]) operation = 'ijk, kl -> ijl' if self._method == "gray" else 'ijl, kl -> ijk' path = np.einsum_path(operation, image[..., :3], conversion, optimize='optimal')[0] @@ -888,7 +888,7 @@ def score_image(self, self._log_once = False if alignments.get("identity", {}).get("vggface2"): - embedding = np.array(alignments["identity"]["vggface2"], dtype="float32") + embedding = np.asarray(alignments["identity"]["vggface2"], dtype="float32") if not self._logged_lm_count_once and len(alignments["landmarks_xy"]) == 4: logger.warning(self._warning) @@ -902,7 +902,7 @@ def score_image(self, "Sorting by this method will be quicker next time") self._output_update_info = False - a_face = AlignedFace(np.array(alignments["landmarks_xy"], dtype="float32"), + a_face = AlignedFace(np.asarray(alignments["landmarks_xy"], dtype="float32"), image=image, centering="legacy", size=self._vgg_face.input_size, @@ -933,8 +933,8 @@ def sort(self) -> None: The original list, sorted for this metric """ logger.info("Sorting by ward linkage. This may take some time...") - preds = np.array([item[1] for item in self._result]) - indices = Cluster(np.array(preds), "ward", threshold=self._threshold)() + preds = np.asarray([item[1] for item in self._result]) + indices = Cluster(np.asarray(preds), "ward", threshold=self._threshold)() self._result = [(self._result[idx][0], float(score)) for idx, score in indices] def binning(self) -> list[list[str]]: diff --git a/tools/sort/sort_methods_aligned.py b/tools/sort/sort_methods_aligned.py index 8f0ff0b8ea..2bfc24140f 100644 --- a/tools/sort/sort_methods_aligned.py +++ b/tools/sort/sort_methods_aligned.py @@ -87,7 +87,7 @@ def score_image(self, "alignments file to generate this data.") raise FaceswapError(msg) - face = AlignedFace(np.array(alignments["landmarks_xy"], dtype="float32")) + face = AlignedFace(np.asarray(alignments["landmarks_xy"], dtype="float32")) if (not self._logged_lm_count_once and face.landmark_type == LandmarkType.LM_2D_4 and self.__class__.__name__ != "SortSize"):