Skip to content
This repository has been archived by the owner on Dec 20, 2024. It is now read-only.

Commit

Permalink
chore: add licences
Browse files Browse the repository at this point in the history
  • Loading branch information
theissenhelen committed May 29, 2024
1 parent b4d8e52 commit 544885e
Show file tree
Hide file tree
Showing 2 changed files with 46 additions and 1 deletion.
45 changes: 45 additions & 0 deletions src/anemoi/models/distributed/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,21 @@ def backward(ctx, grad_output):
class _ReduceShardParallelSection(torch.autograd.Function):
"""All-reduce and shard the input from the parallel section."""

# Modified from
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

@staticmethod
def forward(ctx, input_, dim_, shapes_, mgroup_):
ctx.dim = dim_
Expand All @@ -190,6 +205,21 @@ def backward(ctx, grad_output):
class _ShardParallelSection(torch.autograd.Function):
"""Split the input and keep only the relevant chunck to the rank."""

# Modified from
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

@staticmethod
def forward(ctx, input_, dim_, shapes_, gather_in_backward_, mgroup_):
ctx.dim = dim_
Expand Down Expand Up @@ -218,6 +248,21 @@ def backward(ctx, grad_output):
class _GatherParallelSection(torch.autograd.Function):
"""Gather the input from parallel section and concatenate."""

# Modified from
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

@staticmethod
def forward(ctx, input_, dim_, shapes_, mgroup_):
ctx.dim = dim_
Expand Down
2 changes: 1 addition & 1 deletion src/anemoi/models/interface/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def _build_model(self) -> None:
self.pre_processors = Processors(processors)
self.post_processors = Processors(processors, inverse=True)

# Instantiate the model
# Instantiate the model (Can be generalised to other models in the future, here we use AnemoiModelEncProcDec)
self.model = AnemoiModelEncProcDec(
config=self.config, data_indices=self.data_indices, graph_data=self.graph_data
)
Expand Down

0 comments on commit 544885e

Please sign in to comment.