Spaces:
Build error
Build error
| // default hparams (StableLM 3B) | |
| struct gpt_neox_hparams { | |
| int32_t n_vocab = 50257; | |
| int32_t n_ctx = 4096; | |
| int32_t n_embd = 4096; | |
| int32_t n_head = 32; | |
| int32_t n_layer = 16; | |
| int32_t n_rot = 32; // 0.25 * (n_embd / n_head) | |
| int32_t par_res = 1; // 1 = true, 0 = false | |
| int32_t ftype = 1; | |
| }; | |
| // quantize a model | |
| bool gpt_neox_model_quantize(const std::string & fname_inp, const std::string & fname_out, ggml_ftype ftype) { | |
| gpt_vocab vocab; | |
| printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str()); | |
| auto finp = std::ifstream(fname_inp, std::ios::binary); | |
| if (!finp) { | |
| fprintf(stderr, "%s: failed to open '%s' for reading\n", __func__, fname_inp.c_str()); | |
| return false; | |
| } | |
| auto fout = std::ofstream(fname_out, std::ios::binary); | |
| if (!fout) { | |
| fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname_out.c_str()); | |
| return false; | |
| } | |
| // verify magic | |
| { | |
| uint32_t magic; | |
| finp.read((char *) &magic, sizeof(magic)); | |
| if (magic != 0x67676d6c) { | |
| fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname_inp.c_str()); | |
| return false; | |
| } | |
| fout.write((char *) &magic, sizeof(magic)); | |
| } | |
| gpt_neox_hparams hparams; | |
| // load hparams | |
| { | |
| finp.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); | |
| finp.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); | |
| finp.read((char *) &hparams.n_embd, sizeof(hparams.n_embd)); | |
| finp.read((char *) &hparams.n_head, sizeof(hparams.n_head)); | |
| finp.read((char *) &hparams.n_layer, sizeof(hparams.n_layer)); | |
| finp.read((char *) &hparams.n_rot, sizeof(hparams.n_rot)); | |
| finp.read((char *) &hparams.par_res, sizeof(hparams.par_res)); | |
| finp.read((char *) &hparams.ftype, sizeof(hparams.ftype)); | |
| const int32_t qntvr_src = hparams.ftype / GGML_QNT_VERSION_FACTOR; | |
| const int32_t ftype_dst = GGML_QNT_VERSION * GGML_QNT_VERSION_FACTOR + ftype; | |
| printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); | |
| printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); | |
| printf("%s: n_embd = %d\n", __func__, hparams.n_embd); | |
| printf("%s: n_head = %d\n", __func__, hparams.n_head); | |
| printf("%s: n_layer = %d\n", __func__, hparams.n_layer); | |
| printf("%s: par_res = %d\n", __func__, hparams.par_res); | |
| printf("%s: ftype (src) = %d\n", __func__, hparams.ftype); | |
| printf("%s: qntvr (src) = %d\n", __func__, qntvr_src); | |
| printf("%s: ftype (dst) = %d\n", __func__, ftype_dst); | |
| printf("%s: qntvr (dst) = %d\n", __func__, GGML_QNT_VERSION); | |
| fout.write((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); | |
| fout.write((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); | |
| fout.write((char *) &hparams.n_embd, sizeof(hparams.n_embd)); | |
| fout.write((char *) &hparams.n_head, sizeof(hparams.n_head)); | |
| fout.write((char *) &hparams.n_layer, sizeof(hparams.n_layer)); | |
| fout.write((char *) &hparams.n_rot, sizeof(hparams.n_rot)); | |
| fout.write((char *) &hparams.par_res, sizeof(hparams.par_res)); | |
| fout.write((char *) &ftype_dst, sizeof(ftype_dst)); | |
| } | |
| // load vocab | |
| { | |
| const int32_t n_vocab = hparams.n_vocab; | |
| std::string word; | |
| for (int i = 0; i < n_vocab; i++) { | |
| uint32_t len; | |
| finp.read ((char *) &len, sizeof(len)); | |
| fout.write((char *) &len, sizeof(len)); | |
| word.resize(len); | |
| finp.read ((char *) word.data(), len); | |
| fout.write((char *) word.data(), len); | |
| vocab.token_to_id[word] = i; | |
| vocab.id_to_token[i] = word; | |
| } | |
| } | |
| // regexes of tensor names to be quantized | |
| const std::vector<std::string> to_quant = { | |
| ".*weight", | |
| }; | |
| if (!ggml_common_quantize_0(finp, fout, ftype, to_quant, {})) { | |
| fprintf(stderr, "%s: failed to quantize model '%s'\n", __func__, fname_inp.c_str()); | |
| return false; | |
| } | |
| finp.close(); | |
| fout.close(); | |
| return true; | |
| } | |
| // usage: | |
| // ./gpt-neox-quantize models/stalellm2-117M/ggml-model.bin models/stablelm2-117M/ggml-model-quant.bin type | |
| // | |
| int main(int argc, char ** argv) { | |
| ggml_time_init(); | |
| if (argc != 4) { | |
| fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]); | |
| ggml_print_ftypes(stderr); | |
| return 1; | |
| } | |
| // needed to initialize f16 tables | |
| { | |
| struct ggml_init_params params = { 0, NULL, false }; | |
| struct ggml_context * ctx = ggml_init(params); | |
| ggml_free(ctx); | |
| } | |
| const std::string fname_inp = argv[1]; | |
| const std::string fname_out = argv[2]; | |
| const ggml_ftype ftype = ggml_parse_ftype(argv[3]); | |
| const int64_t t_main_start_us = ggml_time_us(); | |
| int64_t t_quantize_us = 0; | |
| // load the model | |
| { | |
| const int64_t t_start_us = ggml_time_us(); | |
| if (!gpt_neox_model_quantize(fname_inp, fname_out, ggml_ftype(ftype))) { | |
| fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); | |
| return 1; | |
| } | |
| t_quantize_us = ggml_time_us() - t_start_us; | |
| } | |
| // report timing | |
| { | |
| const int64_t t_main_end_us = ggml_time_us(); | |
| printf("\n"); | |
| printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0f); | |
| printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); | |
| } | |
| return 0; | |
| } |