Skip to content

Commit

Permalink
fix
Browse files Browse the repository at this point in the history
  • Loading branch information
enkilee committed Jan 24, 2025
1 parent a60afef commit fa4b9c9
Show file tree
Hide file tree
Showing 10 changed files with 16 additions and 23 deletions.
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -711,7 +711,7 @@ set(CMAKE_C_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG")
add_definitions(-DPADDLE_DLL_EXPORT)

if(ON_INFER)
# you can trun off the paddle fluid and inference lib by set ON_INFER=OFF
# you can turn off the paddle fluid and inference lib by set ON_INFER=OFF
message(
STATUS "On inference mode, will take place some specific optimization.")
include(inference_lib)
Expand Down
7 changes: 0 additions & 7 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -140,13 +140,6 @@ suppport = 'suppport'
SWTICH = 'SWTICH'
Swith = 'Swith'
sysyem = 'sysyem'
Tring = 'Tring'
tring = 'tring'
tunning = 'tunning'
TYPLE = 'TYPLE'
trun = 'trun'
tyep = 'tyep'
tpye = 'tpye'
unsupport = 'unsupport'
upsupported = 'upsupported'
Unsupport = 'Unsupport'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ TypeAdtTypeId GetTypeAdtTypeId(const pir::Type& type) {
}
#define RETURN_TYPE_TYPE_ID_IF_MATCH(cls) \
if (type.isa<cls>()) return ::common::AdtTypeId<cls>{};
FOR_EACH_PIR_ALTERNATIVE_TYPLE(RETURN_TYPE_TYPE_ID_IF_MATCH)
FOR_EACH_PIR_ALTERNATIVE_TYPE(RETURN_TYPE_TYPE_ID_IF_MATCH)
#undef RETURN_TYPE_TYPE_ID_IF_MATCH
return ::common::AdtTypeId<UnclassifiedType>{};
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class SparseCsrTensorType;
} // namespace paddle::dialect

// clang-format off
#define FOR_EACH_PIR_ALTERNATIVE_TYPLE(__macro) \
#define FOR_EACH_PIR_ALTERNATIVE_TYPE(__macro) \
__macro(::pir::VectorType) \
__macro(::pir::DenseTensorType) \
__macro(::pir::BFloat16Type) \
Expand Down Expand Up @@ -76,7 +76,7 @@ class UnclassifiedType;
using TypeAdtTypeIdBase =
::common::AdtBaseTypeId<NullType,
#define MAKE_TYPE_ADT_TYPE_ID_ALTERNATIVE(cls) cls,
FOR_EACH_PIR_ALTERNATIVE_TYPLE(
FOR_EACH_PIR_ALTERNATIVE_TYPE(
MAKE_TYPE_ADT_TYPE_ID_ALTERNATIVE)
#undef MAKE_TYPE_ADT_TYPE_ID_ALTERNATIVE
UnclassifiedType>;
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/ir/schedule/impl/loop_transformation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ Expr DyScheduleImpl::Fuse(const std::vector<Expr>& loops) {
std::string primitive = "Fuse";
std::ostringstream os;

VLOG(3) << "Tring to fuse:\n" << loops[0];
VLOG(3) << "Trying to fuse:\n" << loops[0];
std::vector<const ir::For*> for_nodes;
std::vector<Var> loop_vars;

Expand Down
12 changes: 6 additions & 6 deletions paddle/fluid/eager/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
tensor,
common::errors::Fatal(
"Tensor is null and cannot be copied. "
"We are tring to TrySyncToVars tensor from its "
"We are trying to TrySyncToVars tensor from its "
"shared_ptr, this error may indicate some outputs "
"are nullptr"));
res.emplace_back(TrySyncToVar(*tensor));
Expand Down Expand Up @@ -420,7 +420,7 @@ std::vector<paddle::Tensor> EagerUtils::GetOutputs(
out.get(),
common::errors::Fatal(
"Eager Tensor %s is null and cannot be copied. "
"We are tring to Get Output tensor from its "
"We are trying to Get Output tensor from its "
"shared_ptr, this error may indicate some outputs "
"are nullptr",
out->name()));
Expand All @@ -435,7 +435,7 @@ paddle::Tensor EagerUtils::GetOutput(
out.get(),
common::errors::Fatal(
"Eager Tensor %s is null and cannot be copied. We "
"are tring to Get Output tensor from its shared_ptr, "
"are trying to Get Output tensor from its shared_ptr, "
"this error may indicate output is nullptr",
out->name()));
return paddle::Tensor(out->GetTensorBase(), out->name());
Expand All @@ -446,7 +446,7 @@ void EagerUtils::GetOutput(const std::shared_ptr<EagerVariable>& out,
PADDLE_ENFORCE_NOT_NULL(
out_var,
common::errors::Fatal("Tensor is null and cannot be copied. "
"We are tring to OverwriteOutput from its "
"We are trying to OverwriteOutput from its "
"shared_ptr, this error may indicate some outputs "
"are nullptr"));
out_var->set_impl(out->GetTensorBase());
Expand All @@ -469,7 +469,7 @@ void EagerUtils::GetOutputs(
out_var[i],
common::errors::Fatal(
"Tensor is null and cannot be copied. "
"We are tring to OverwriteOutput from its "
"We are trying to OverwriteOutput from its "
"shared_ptr, this error may indicate some outputs "
"are nullptr"));
out_var[i]->set_impl(outs[i]->GetTensorBase());
Expand All @@ -486,7 +486,7 @@ void EagerUtils::GetOutputs(const std::shared_ptr<EagerVariable>& out,
PADDLE_ENFORCE_NOT_NULL(
out_var[0],
common::errors::Fatal("Tensor is null and cannot be copied. "
"We are tring to OverwriteOutput from its "
"We are trying to OverwriteOutput from its "
"shared_ptr, this error may indicate some outputs "
"are nullptr"));
out_var[0]->set_impl(out->GetTensorBase());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -722,7 +722,7 @@ bool PIRPrelnResidualBiasPluginDynamic::supportsFormatCombination(
PADDLE_ENFORCE_NOT_NULL(
in_out,
common::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
"The input of swish plugin should not be nullptr."));

PADDLE_ENFORCE_LT(
pos,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ int ProfileToGetBestConfig(
&all_func,
const GemmEpilogueAllParams &params,
OpType op_type) {
std::cout << "we are tunning for problem: [" << params.m << ", " << params.n
std::cout << "we are running for problem: [" << params.m << ", " << params.n
<< ", " << params.k << "]" << std::endl;

constexpr int WARMUP = 10;
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/distributed/auto_parallel/static/mapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,13 @@ def is_collective_comm_op(op):
"all_reduce",
"broadcast",
]
reduce_tyep = [
reduce_type = [
dist.ReduceOp.SUM,
dist.ReduceOp.MIN,
dist.ReduceOp.MAX,
dist.ReduceOp.PROD,
]
if op.type == "reduce" and op.attr("reduce_tyep") in reduce_tyep:
if op.type == "reduce" and op.attr("reduce_type") in reduce_type:
return True
if op.type in comm_list:
return True
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -362,7 +362,7 @@ def backward_func(cost):
self.check_sparse_gradient_clip(place)

# raise typeError
def test_tpyeError(self):
def test_typeError(self):
# the type of optimizer(grad_clip=) must be an instance of GradientClipBase's derived class
with self.assertRaises(TypeError):
sgd_optimizer = paddle.optimizer.SGD(
Expand Down

0 comments on commit fa4b9c9

Please sign in to comment.