diff --git a/format_out/impl.py b/format_out/impl.py index 5638899f..61f5f5c3 100644 --- a/format_out/impl.py +++ b/format_out/impl.py @@ -107,19 +107,6 @@ def gen_number_v2(self, max_new_tokens=None, prefix_regex=None): retry_count=self.default_retry_count, ) - def gen_number_v2(self, max_new_tokens=None, prefix_regex=None): - """ - 包含分数的支持 - """ - if max_new_tokens is None: - max_new_tokens = 100 - return self.generate( - r"-?(\d+(\.\d+)?|\d+/\d+|\d+/\d+\.\d+)", - max_new_tokens=max_new_tokens, - prefix_regex=prefix_regex, - retry_count=self.default_retry_count, - ) - def gen_json_object(self, obj: BaseModel, max_new_tokens=512, prefix_regex=None, whitespace_pattern=r"[\s]{0,12}"): json_schema = obj.model_json_schema() regex_str = build_regex_from_schema(json.dumps(json_schema), whitespace_pattern=whitespace_pattern) diff --git a/lightllm/server/httpserver/manager.py b/lightllm/server/httpserver/manager.py index 0c905e5d..cec871d8 100644 --- a/lightllm/server/httpserver/manager.py +++ b/lightllm/server/httpserver/manager.py @@ -113,8 +113,6 @@ async def generate( sampling_params.stop_sentences_to_token_ids(self.tokenizer) - sampling_params.stop_sentences_to_token_ids(self.tokenizer) - # 统计信息变量 start_time = time.time() out_token_counter = 0