Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

default to get data_type startargs from config.json and fix deepseekv… #589

Merged
merged 1 commit into from
Oct 28, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,9 @@ def context_attention_fwd(
else:
BLOCK = 128 if not TESLA else 64

if q_nope.dtype == torch.float32:
BLOCK = BLOCK // 4

sm_scale = softmax_scale
batch, head = b_seq_len.shape[0], q_nope.shape[1]
kv_group_num = q_nope.shape[1] # deepseekv2 的 group 就是q的head数量,类似于MQA
Expand Down Expand Up @@ -370,6 +373,9 @@ def context_attention_fwd_no_prompt_cache(
else:
BLOCK = 128 if not TESLA else 64

if q_nope.dtype == torch.float32:
BLOCK = BLOCK // 4

sm_scale = softmax_scale
batch, head = b_seq_len.shape[0], q_nope.shape[1]
kv_group_num = q_nope.shape[1]
Expand Down
8 changes: 7 additions & 1 deletion lightllm/server/api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -449,7 +449,7 @@ def make_argument_parser() -> argparse.ArgumentParser:
"--data_type",
type=str,
choices=["fp16", "float16", "bf16", "bfloat16", "fp32", "float32"],
default="float16",
default=None,
help="the data type of the model weight",
)
parser.add_argument("--return_all_prompt_logprobs", action="store_true", help="return all prompt tokens logprobs")
Expand Down Expand Up @@ -562,6 +562,12 @@ def main():

args.eos_id = get_eos_token_ids(args.model_dir)

if args.data_type is None:
from lightllm.utils.config_utils import get_dtype

args.data_type = get_dtype(args.model_dir)
assert args.data_type in ["fp16", "float16", "bf16", "bfloat16", "fp32", "float32"]

logger.info(f"all start args:{args}")

can_use_ports = alloc_can_use_network_port(num=6 + args.tp, used_nccl_port=args.nccl_port)
Expand Down
13 changes: 13 additions & 0 deletions lightllm/utils/config_utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
import json
import os
from lightllm.utils.log_utils import init_logger

logger = init_logger(__name__)


def get_config_json(model_path: str):
Expand All @@ -16,3 +19,13 @@ def get_eos_token_ids(model_path: str):
if isinstance(eos_token_id, list):
return eos_token_id
assert False, "error eos_token_id format in config.json"


def get_dtype(model_path: str):
config_json = get_config_json(model_path)
try:
torch_dtype = config_json["torch_dtype"]
return torch_dtype
except:
logger.warning("torch_dtype not in config.json, use float16 as default")
return "float16"
Loading