Your current environment
Ascend 910B4 2node
version:
ucm0.5.0rc1
vllm-ascend 0.18.0rc1
GLM-4.7-w8a8-with-float-mtp
🐛 Describe the bug
1.vllm run:
node0:
#!/bin/sh
# this obtained through ifconfig
# nic_name is the network interface name corresponding to local_ip of the current node
nic_name="xxxx"
local_ip="xxxx"
nic_name="bond0"
export HCCL_IF_IP=$local_ip
export GLOO_SOCKET_IFNAME=$nic_name
export TP_SOCKET_IFNAME=$nic_name
export HCCL_SOCKET_IFNAME=$nic_name
export HCCL_BUFFSIZE=512
export OMP_PROC_BIND=false
export OMP_NUM_THREADS=1
export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True
export HCCL_OP_EXPANSION_MODE=AIV
export VLLM_ASCEND_BALANCE_SCHEDULING=1
export VLLM_ASCEND_ENABLE_TOPK_OPTIMIZE=1
export VLLM_ASCEND_ENABLE_FLASHCOMM1=1
export VLLM_ASCEND_ENABLE_FUSED_MC2=1
vllm serve Eco-Tech/GLM-4.7-W8A8-floatmtp \
--host 0.0.0.0 \
--port 8004 \
--data-parallel-size 2 \
--data-parallel-size-local 1 \
--data-parallel-start-rank 0 \
--data-parallel-address $local_ip \
--data-parallel-rpc-port 13389 \
--tensor-parallel-size 8 \
--enable-expert-parallel \
--seed 1024 \
--max-model-len 140000 \
--max-num-batched-tokens 8192 \
--max-num-seqs 16 \
--async-scheduling \
--quantization ascend \
--trust-remote-code \
--gpu-memory-utilization 0.9 \
--enable-auto-tool-choice \
--reasoning-parser glm45 \
--tool-call-parser glm47 \
--served-model-name glm47 \
--block_size 128 \
--kv-transfer-config '{"kv_connector": "UCMConnector", "kv_connector_module_path": "ucm.integration.vllm.ucm_connector", "kv_role": "kv_both", "kv_connector_extra_config": {"UCM_CONFIG_FILE": "/workspace/unified-cache-management/examples/ucm_config_example.yaml"}}' \
--speculative-config '{"num_speculative_tokens": 3, "model":"Eco-Tech/GLM-4.7-W8A8-floatmtp", "method":"mtp"}' \
--compilation-config '{"cudagraph_capture_sizes": [1,2,4,8,16,32,64,128,256,512], "cudagraph_mode": "FULL_DECODE_ONLY"}' \
--additional-config '{"enable_shared_expert_dp": true, "ascend_fusion_config": {"fusion_ops_gmmswigluquant": false}}'
node1:
#!/bin/sh
# this obtained through ifconfig
# nic_name is the network interface name corresponding to local_ip of the current node
nic_name="xxxx"
local_ip="xxxx"
node0_ip="xxxx" # same as the local_IP address in node 0
nic_name="bond0"
export HCCL_IF_IP=$local_ip
export GLOO_SOCKET_IFNAME=$nic_name
export TP_SOCKET_IFNAME=$nic_name
export HCCL_SOCKET_IFNAME=$nic_name
export HCCL_BUFFSIZE=512
export OMP_PROC_BIND=false
export OMP_NUM_THREADS=1
export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True
export HCCL_OP_EXPANSION_MODE=AIV
export VLLM_ASCEND_BALANCE_SCHEDULING=1
export VLLM_ASCEND_ENABLE_TOPK_OPTIMIZE=1
export VLLM_ASCEND_ENABLE_FLASHCOMM1=1
export VLLM_ASCEND_ENABLE_FUSED_MC2=1
vllm serve Eco-Tech/GLM-4.7-W8A8-floatmtp \
--host 0.0.0.0 \
--port 8004 \
--headless \
--data-parallel-size 2 \
--data-parallel-size-local 1 \
--data-parallel-start-rank 1 \
--data-parallel-address $node0_ip \
--data-parallel-rpc-port 13389 \
--tensor-parallel-size 8 \
--enable-expert-parallel \
--seed 1024 \
--max-model-len 140000 \
--max-num-batched-tokens 8192 \
--max-num-seqs 16 \
--async-scheduling \
--quantization ascend \
--trust-remote-code \
--gpu-memory-utilization 0.9 \
--enable-auto-tool-choice \
--reasoning-parser glm45 \
--tool-call-parser glm47 \
--served-model-name glm47 \
--block_size 128 \
--kv-transfer-config '{"kv_connector": "UCMConnector", "kv_connector_module_path": "ucm.integration.vllm.ucm_connector", "kv_role": "kv_both", "kv_connector_extra_config": {"UCM_CONFIG_FILE": "/workspace/unified-cache-management/examples/ucm_config_example.yaml"}}' \
--speculative-config '{"num_speculative_tokens": 3, "model":"Eco-Tech/GLM-4.7-W8A8-floatmtp", "method":"mtp"}' \
--compilation-config '{"cudagraph_capture_sizes": [1,2,4,8,16,32,64,128,256,512], "cudagraph_mode": "FULL_DECODE_ONLY"}' \
--additional-config '{"enable_shared_expert_dp": true, "ascend_fusion_config": {"fusion_ops_gmmswigluquant": false}}'
2.ucm config:
ucm_connectors:
- ucm_connector_name: "UcmPipelineStore"
ucm_connector_config:
store_pipeline: "Cache|Posix"
storage_backends: "/mnt/ucm"
io_direct: false
timeout_ms:60000
# When you use UcmNfsStore, you should set enable_event_sync to false.
enable_event_sync: true
3.question:
It runs fine under normal conditions, but timeouts occur when concurrency increases to 10+ with long sequences (120K):
when add:
use_layerwise: true
the error is :
the speed of disk read and write:

Your current environment
Ascend 910B4 2node
🐛 Describe the bug
1.vllm run:
2.ucm config:
3.question:
It runs fine under normal conditions, but timeouts occur when concurrency increases to 10+ with long sequences (120K):
when add:
use_layerwise: truethe error is :
the speed of disk read and write: