Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,64 +1,101 @@
|
|
1 |
-
import gradio as gr
|
2 |
import argparse
|
|
|
3 |
|
4 |
def main():
|
5 |
-
parser = argparse.ArgumentParser(description='
|
6 |
-
parser.add_argument('--
|
7 |
-
parser.add_argument('--
|
8 |
-
parser.add_argument('--
|
9 |
-
parser.add_argument('--
|
10 |
-
parser.add_argument('--
|
11 |
|
12 |
args = parser.parse_args()
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
}
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import argparse
|
2 |
+
from tabulate import tabulate
|
3 |
|
4 |
def main():
|
5 |
+
parser = argparse.ArgumentParser(description='Your script description')
|
6 |
+
parser.add_argument('-g', '--num_gpu', type=int, default=1, help='Number of GPUs')
|
7 |
+
parser.add_argument('-p', '--prompt_sz', type=int, default=4096, help='Prompt size in tokens')
|
8 |
+
parser.add_argument('-r', '--response_sz', type=int, default=256, help='Response size in tokens')
|
9 |
+
parser.add_argument('-c', '--n_concurrent_req', type=int, default=10, help='Number of concurrent requests')
|
10 |
+
parser.add_argument('-w', '-cw', '--ctx_window', type=int, default=1024, help='Average context window')
|
11 |
|
12 |
args = parser.parse_args()
|
13 |
|
14 |
+
num_gpu = args.num_gpu
|
15 |
+
prompt_size = args.prompt_sz
|
16 |
+
response_size = args.response_sz
|
17 |
+
n_concurrent_request = args.n_concurrent_req
|
18 |
+
avg_context_window = args.ctx_window
|
19 |
+
|
20 |
+
# Print input
|
21 |
+
print(f" num_gpu = {num_gpu}, prompt_size = {prompt_size} tokens, response_size = {response_size} tokens")
|
22 |
+
print(f" n_concurrent_request = {n_concurrent_request}, avg_context_window = {avg_context_window} tokens")
|
23 |
+
|
24 |
+
# Define variables
|
25 |
+
gpu_specs = [
|
26 |
+
{"name": "A10", "fp16_tflops": 125, "memory_gb": 24, "memory_bandwidth_gbps": 600},
|
27 |
+
{"name": "A30", "fp16_tflops": 330, "memory_gb": 24, "memory_bandwidth_gbps": 933},
|
28 |
+
{"name": "L40", "fp16_tflops": 181, "memory_gb": 48, "memory_bandwidth_gbps": 864},
|
29 |
+
{"name": "L40s", "fp16_tflops": 362, "memory_gb": 48, "memory_bandwidth_gbps": 864},
|
30 |
+
{"name": "A100 40 GB", "fp16_tflops": 312, "memory_gb": 40, "memory_bandwidth_gbps": 1555},
|
31 |
+
{"name": "A100 40 GB SXM", "fp16_tflops": 312, "memory_gb": 40, "memory_bandwidth_gbps": 1555},
|
32 |
+
{"name": "A100 80 GB PCIe", "fp16_tflops": 312, "memory_gb": 80, "memory_bandwidth_gbps": 1935},
|
33 |
+
{"name": "A100 80 GB SXM", "fp16_tflops": 312, "memory_gb": 80, "memory_bandwidth_gbps": 2039},
|
34 |
+
{"name": "H100 PCIe", "fp16_tflops": 1513, "memory_gb": 80, "memory_bandwidth_gbps": 2000},
|
35 |
+
{"name": "H100 SXM", "fp16_tflops": 1979, "memory_gb": 80, "memory_bandwidth_gbps": 3350},
|
36 |
+
{"name": "H100 NVL", "fp16_tflops": 3958, "memory_gb": 188, "memory_bandwidth_gbps": 7800}
|
37 |
+
# Add or comment out GPU types as needed
|
38 |
+
]
|
39 |
+
|
40 |
+
model_specs = [
|
41 |
+
{"name": "Llama-3-8B", "params_billion": 8, "d_model": 4096, "n_heads": 32, "n_layers": 32, "max_context_window": 8192, "d_head": 128},
|
42 |
+
{"name": "Llama-3-70B", "params_billion": 70, "d_model": 8192, "n_heads": 64, "n_layers": 80, "max_context_window": 8192, "d_head": 128},
|
43 |
+
{"name": "Llama-3.1-8B", "params_billion": 8, "d_model": 4096, "n_heads": 32, "n_layers": 32, "max_context_window": 131072, "d_head": 128},
|
44 |
+
{"name": "Llama-3.1-70B", "params_billion": 70, "d_model": 8192, "n_heads": 64, "n_layers": 80, "max_context_window": 131072, "d_head": 128},
|
45 |
+
{"name": "Mistral-7B-v0.3", "params_billion": 7, "d_model": 4096, "n_heads": 32, "n_layers": 32, "max_context_window": 32768, "d_head": 128},
|
46 |
+
{"name": "Falcon-7B", "params_billion": 7, "d_model": 4544, "n_heads": 71, "n_layers": 32, "max_context_window": 2048, "d_head": 64},
|
47 |
+
{"name": "Falcon-40B", "params_billion": 40, "d_model": 8192, "n_heads": 128, "n_layers": 60, "max_context_window": 2048, "d_head": 64},
|
48 |
+
{"name": "Falcon-180B", "params_billion": 180, "d_model": 14848, "n_heads": 232, "n_layers": 80, "max_context_window": 2048, "d_head": 64}
|
49 |
+
# Add or comment out model specifications as needed
|
50 |
+
]
|
51 |
+
|
52 |
+
BYTES_IN_GB = 1_073_741_824 # 1 GB = 1,073,741,824 bytes
|
53 |
+
|
54 |
+
def calc_kv_cache_size_per_token(n_layers, d_model):
|
55 |
+
return 2 * 2 * n_layers * d_model / BYTES_IN_GB # GB/token
|
56 |
+
|
57 |
+
def calc_memory_footprint(model_spec, n_concurrent_request, avg_context_window):
|
58 |
+
kv_cache_size_per_token = calc_kv_cache_size_per_token(model_spec["n_layers"], model_spec["d_model"])
|
59 |
+
target_gpu_mem = kv_cache_size_per_token * avg_context_window * n_concurrent_request + model_spec["params_billion"] * 2
|
60 |
+
return target_gpu_mem
|
61 |
+
|
62 |
+
print(f"\n******************** Estimate LLM Memory Footprint ********************")
|
63 |
+
memory_footprint_table = []
|
64 |
+
for model_spec in model_specs:
|
65 |
+
kv_cache_size_per_token = calc_kv_cache_size_per_token(model_spec["n_layers"], model_spec["d_model"])
|
66 |
+
memory_footprint = calc_memory_footprint(model_spec, n_concurrent_request, avg_context_window)
|
67 |
+
memory_footprint_table.append([model_spec['name'], f"{kv_cache_size_per_token:.6f} GiB/token", f"{memory_footprint:.2f} GB"])
|
68 |
+
print(tabulate(memory_footprint_table, headers=['Model', 'KV Cache Size per Token', 'Memory Footprint'], tablefmt='orgtbl'))
|
69 |
+
|
70 |
+
def calc_kv_cache_tokens(num_gpu, gpu_memory_gb, model_params_billion, kv_cache_size):
|
71 |
+
result = (num_gpu * gpu_memory_gb - 2 * model_params_billion) / kv_cache_size
|
72 |
+
return result if result >= 0 else "OOM"
|
73 |
+
|
74 |
+
def calc_prefill_time_per_token(num_gpu, model_params_billion, fp16_tflops):
|
75 |
+
result = (2 * model_params_billion / num_gpu) / fp16_tflops
|
76 |
+
return result if result >= 0 else "OOM"
|
77 |
+
|
78 |
+
def calc_generation_time_per_token(num_gpu, model_params_billion, memory_bandwidth_gbps):
|
79 |
+
result = (2 * model_params_billion / num_gpu) / memory_bandwidth_gbps * 1000
|
80 |
+
return result if result >= 0 else "OOM"
|
81 |
+
|
82 |
+
def calc_estimated_response_time(prefill_time, generation_time, prompt_size, response_size):
|
83 |
+
if isinstance(prefill_time, str) or isinstance(generation_time, str): # Check if any are "NA"
|
84 |
+
return "OOM"
|
85 |
+
return (prompt_size * prefill_time + response_size * generation_time) / 1000 # convert ms to seconds
|
86 |
+
|
87 |
+
print(f"\n******************** Estimate LLM Capacity and Latency ******************** ")
|
88 |
+
capacity_latency_table = []
|
89 |
+
for model in model_specs:
|
90 |
+
# print(f"Model: {model['name']} ({model['params_billion']}B parameters)")
|
91 |
+
kv_cache_size = calc_kv_cache_size_per_token(model['n_layers'], model['d_model'])
|
92 |
+
for gpu in gpu_specs:
|
93 |
+
kv_cache_tokens = calc_kv_cache_tokens(num_gpu, gpu['memory_gb'], model['params_billion'], kv_cache_size)
|
94 |
+
prefill_time_per_token = calc_prefill_time_per_token(num_gpu, model['params_billion'], gpu['fp16_tflops'])
|
95 |
+
generation_time_per_token = calc_generation_time_per_token(num_gpu, model['params_billion'], gpu['memory_bandwidth_gbps'])
|
96 |
+
estimated_response_time = calc_estimated_response_time(prefill_time_per_token, generation_time_per_token, prompt_size, response_size)
|
97 |
+
capacity_latency_table.append([model['name'], gpu['name'], f"{kv_cache_tokens}", f"{prefill_time_per_token:.3f} ms", f"{generation_time_per_token:.3f} ms", f"{estimated_response_time:.1f} s"])
|
98 |
+
print(tabulate(capacity_latency_table, headers=['Model', 'GPU', 'KV Cache Tokens', 'Prefill Time', 'Generation Time', 'Estimated Response Time'], tablefmt='orgtbl'))
|
99 |
+
|
100 |
+
if __name__ == '__main__':
|
101 |
+
main()
|