wang2vec: move to the right position
diff --git a/cngram2vec.c b/cngram2vec.c
new file mode 100644
index 0000000..266cf76
--- /dev/null
+++ b/cngram2vec.c
@@ -0,0 +1,1442 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include <pthread.h>
+
+#define MAX_STRING 100
+#define EXP_TABLE_SIZE 1000
+#define MAX_EXP 6
+#define MAX_SENTENCE_LENGTH 1000
+#define MAX_CODE_LENGTH 40
+
+const int vocab_hash_size = 30000000; // Maximum 30 * 0.7 = 21M words in the vocabulary
+
+typedef float real; // Precision of float numbers
+
+struct vocab_word {
+ long long cn;
+ int *point;
+ char *word, *code, codelen;
+};
+
+char train_file[MAX_STRING], output_file[MAX_STRING];
+char save_vocab_file[MAX_STRING], read_vocab_file[MAX_STRING];
+struct vocab_word *vocab;
+int binary = 0, type = 1, debug_mode = 2, window = 5, min_count = 5, num_threads = 12, min_reduce = 1;
+int *vocab_hash;
+long long vocab_max_size = 1000, vocab_size = 0, layer1_size = 100;
+long long train_words = 0, word_count_actual = 0, iter = 5, file_size = 0, classes = 0;
+real alpha = 0.025, starting_alpha, sample = 1e-3;
+real *syn0, *syn1, *syn1neg, *syn1nce, *expTable;
+clock_t start;
+
+real *syn1_window, *syn1neg_window, *syn1nce_window;
+int w_offset, window_layer_size;
+
+int window_hidden_size = 500;
+real *syn_window_hidden, *syn_hidden_word, *syn_hidden_word_neg, *syn_hidden_word_nce;
+
+int hs = 0, negative = 5;
+const int table_size = 1e8;
+int *table;
+
+//constrastive negative sampling
+char negative_classes_file[MAX_STRING];
+int *word_to_group;
+int *group_to_table; //group_size*table_size
+int class_number;
+
+//nce
+real* noise_distribution;
+int nce = 10;
+
+//param caps
+real CAP_VALUE = 50;
+int cap = 0;
+
+// char models
+char boundToken = 'Z';
+char *unkNgramToken = "ZZZ";
+int cngram_size = 6;
+real *syn0_cngram;
+long long cngram_vocab_size = 0;
+struct vocab_word *cngram_vocab;
+int *cngram_vocab_hash;
+long long cngram_vocab_max_size = 1000;
+char extra_vocab_file[MAX_STRING];
+long long maxNgramSize = 1000000;
+
+// Returns hash value of a word
+int GetWordHash(char *word) {
+ unsigned long long a, hash = 0;
+ for (a = 0; a < strlen(word); a++) hash = hash * 257 + word[a];
+ hash = hash % vocab_hash_size;
+ return hash;
+}
+
+// Search
+int SearchCNgramVocab(char *ngram) {
+ unsigned int hash = GetWordHash(ngram);
+ while (1) {
+ if (cngram_vocab_hash[hash] == -1) return -1;
+ if (!strcmp(ngram, cngram_vocab[cngram_vocab_hash[hash]].word)) return cngram_vocab_hash[hash];
+ hash = (hash + 1) % vocab_hash_size;
+ }
+ return -1;
+}
+
+// char functions
+void ForwardCNgramWordNgram(real *output, char *ngram){
+ long long a;
+ int index = SearchCNgramVocab(ngram);
+ if (index == -1) {index = SearchCNgramVocab(unkNgramToken);}
+ long long startIndex = layer1_size * index;
+ for (a = 0; a < layer1_size; a++){
+ output[a] += syn0_cngram[startIndex + a];
+ }
+}
+
+void ForwardCNgramWordRepresentation(real *output, char *word){
+ int length = strlen(word);
+ int start;
+ int cur_len;
+ char *ngram;
+ char tmp[cngram_size+1];
+ tmp[cngram_size] = '\0';
+ int ngrams = 0;
+ for(start = 0; start < length-cngram_size+1; start++){
+ ngram = word + start;
+ strncpy(tmp, ngram, cngram_size);
+ ForwardCNgramWordNgram(output, tmp);
+ ngrams++;
+ }
+ for(cur_len = 0; cur_len < cngram_size-1; cur_len++) tmp[cur_len] = boundToken;
+ strncpy(tmp+1, word, cur_len);
+ ForwardCNgramWordNgram(output, tmp);
+ for(cur_len = 0; cur_len < cngram_size-1; cur_len++) tmp[cngram_size-cur_len-1] = boundToken;
+ cur_len = cngram_size - 1;
+ if(length < cur_len){
+ cur_len = length;
+ }
+ ngram = word + length - cur_len;
+ strncpy(tmp, ngram, cur_len);
+ tmp[cur_len] = 'Z';
+ tmp[cur_len + 1] = '\0';
+ ForwardCNgramWordNgram(output, tmp);
+ for(start = 0; start < layer1_size; start++){
+ output[start] /= ngrams+2;
+ }
+}
+
+void BackwardCNgramWordNgram(real *output, char *ngram, real *output_err){
+ long long a;
+ int index = SearchCNgramVocab(ngram);
+ if (index == -1) index = SearchCNgramVocab(unkNgramToken);
+ long long startIndex = layer1_size * index;
+ for (a = 0; a < layer1_size; a++){
+ syn0_cngram[startIndex + a] += output_err[a];
+ }
+}
+
+void BackwardCNgramWordRepresentation(real *output, char *word, real *output_err){
+ int length = strlen(word);
+ int start;
+ int cur_len;
+ char *ngram;
+ char tmp[cngram_size+1];
+ tmp[cngram_size] = '\0';
+ for(start = 0; start < length-cngram_size+1; start++){
+ ngram = word + start;
+ strncpy(tmp, ngram, cngram_size);
+ BackwardCNgramWordNgram(output, tmp, output_err);
+ }
+ for(cur_len = 0; cur_len < cngram_size-1; cur_len++) tmp[cur_len] = boundToken;
+ strncpy(tmp+1, word, cur_len);
+ BackwardCNgramWordNgram(output, tmp, output_err);
+ for(cur_len = 0; cur_len < cngram_size-1; cur_len++) tmp[cngram_size-cur_len-1] = boundToken;
+ cur_len = cngram_size - 1;
+ if(length < cur_len){
+ cur_len = length;
+ }
+ ngram = word + length - cur_len;
+ strncpy(tmp, ngram, cur_len);
+ tmp[cur_len] = 'Z';
+ tmp[cur_len + 1] = '\0';
+ BackwardCNgramWordNgram(output, tmp, output_err);
+}
+
+void AddWordNgramToVocab(char *ngram, int count){
+ int index = SearchCNgramVocab(ngram);
+ if(index != -1){
+ cngram_vocab[index].cn+=count;
+ return;
+ }
+ unsigned int hash, length = strlen(ngram) + 1;
+ if (length > MAX_STRING) length = MAX_STRING;
+ cngram_vocab[cngram_vocab_size].word = (char *)calloc(length, sizeof(char));
+ strcpy(cngram_vocab[cngram_vocab_size].word, ngram);
+ cngram_vocab[cngram_vocab_size].cn = count;
+ cngram_vocab_size++;
+ // Reallocate memory if needed
+ if (cngram_vocab_size + 2 >= cngram_vocab_max_size) {
+ cngram_vocab_max_size += 1000;
+ cngram_vocab = (struct vocab_word *)realloc(cngram_vocab, cngram_vocab_max_size * sizeof(struct vocab_word));
+ }
+ hash = GetWordHash(ngram);
+ while (cngram_vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
+ cngram_vocab_hash[hash] = cngram_vocab_size - 1;
+}
+
+void AddAllWordNgramToVocab(char *word, int count){
+ int length = strlen(word);
+ int start;
+ int cur_len;
+ char *ngram;
+ char tmp[cngram_size+1];
+ tmp[cngram_size] = '\0';
+ for(start = 0; start < length-cngram_size+1; start++){
+ ngram = word + start;
+ strncpy(tmp, ngram, cngram_size);
+ AddWordNgramToVocab(tmp, count);
+ }
+ for(cur_len = 0; cur_len < cngram_size-1; cur_len++) tmp[cur_len] = boundToken;
+ strncpy(tmp+1, word, cur_len);
+ AddWordNgramToVocab(tmp, count);
+ for(cur_len = 0; cur_len < cngram_size-1; cur_len++) tmp[cngram_size-cur_len-1] = boundToken;
+ cur_len = cngram_size - 1;
+ if(length < cur_len){
+ cur_len = length;
+ }
+ ngram = word + length - cur_len;
+ strncpy(tmp, ngram, cur_len);
+ tmp[cur_len] = 'Z';
+ tmp[cur_len + 1] = '\0';
+ AddWordNgramToVocab(tmp, count);
+}
+
+void capParam(real* array, int index){
+ if(array[index] > CAP_VALUE)
+ array[index] = CAP_VALUE;
+ else if(array[index] < -CAP_VALUE)
+ array[index] = -CAP_VALUE;
+}
+
+real hardTanh(real x){
+ if(x>=1){
+ return 1;
+ }
+ else if(x<=-1){
+ return -1;
+ }
+ else{
+ return x;
+ }
+}
+
+real dHardTanh(real x, real g){
+ if(x > 1 && g > 0){
+ return 0;
+ }
+ if(x < -1 && g < 0){
+ return 0;
+ }
+ return 1;
+}
+
+void InitUnigramTable() {
+ int a, i;
+ long long train_words_pow = 0;
+ real d1, power = 0.75;
+ table = (int *)malloc(table_size * sizeof(int));
+ for (a = 0; a < vocab_size; a++) train_words_pow += pow(vocab[a].cn, power);
+ i = 0;
+ d1 = pow(vocab[i].cn, power) / (real)train_words_pow;
+ for (a = 0; a < table_size; a++) {
+ table[a] = i;
+ if (a / (real)table_size > d1) {
+ i++;
+ d1 += pow(vocab[i].cn, power) / (real)train_words_pow;
+ }
+ if (i >= vocab_size) i = vocab_size - 1;
+ }
+
+ noise_distribution = (real *)calloc(vocab_size, sizeof(real));
+ for (a = 0; a < vocab_size; a++) noise_distribution[a] = pow(vocab[a].cn, power)/(real)train_words_pow;
+}
+
+// Reads a single word from a file, assuming space + tab + EOL to be word boundaries
+void ReadWord(char *word, FILE *fin) {
+ int a = 0, ch;
+ while (!feof(fin)) {
+ ch = fgetc(fin);
+ if (ch == 13) continue;
+ if ((ch == ' ') || (ch == '\t') || (ch == '\n')) {
+ if (a > 0) {
+ if (ch == '\n') ungetc(ch, fin);
+ break;
+ }
+ if (ch == '\n') {
+ strcpy(word, (char *)"</s>");
+ return;
+ } else continue;
+ }
+ word[a] = ch;
+ a++;
+ if (a >= MAX_STRING - 1) a--; // Truncate too long words
+ }
+ word[a] = 0;
+}
+
+// Returns position of a word in the vocabulary; if the word is not found, returns -1
+int SearchVocab(char *word) {
+ unsigned int hash = GetWordHash(word);
+ while (1) {
+ if (vocab_hash[hash] == -1) return -1;
+ if (!strcmp(word, vocab[vocab_hash[hash]].word)) return vocab_hash[hash];
+ hash = (hash + 1) % vocab_hash_size;
+ }
+ return -1;
+}
+
+// Reads a word and returns its index in the vocabulary
+int ReadWordIndex(FILE *fin) {
+ char word[MAX_STRING];
+ ReadWord(word, fin);
+ if (feof(fin)) return -1;
+ return SearchVocab(word);
+}
+
+// Adds a word to the vocabulary
+int AddWordToVocab(char *word) {
+ unsigned int hash, length = strlen(word) + 1;
+ if (length > MAX_STRING) length = MAX_STRING;
+ vocab[vocab_size].word = (char *)calloc(length, sizeof(char));
+ strcpy(vocab[vocab_size].word, word);
+ vocab[vocab_size].cn = 0;
+ vocab_size++;
+ // Reallocate memory if needed
+ if (vocab_size + 2 >= vocab_max_size) {
+ vocab_max_size += 1000;
+ vocab = (struct vocab_word *)realloc(vocab, vocab_max_size * sizeof(struct vocab_word));
+ }
+ hash = GetWordHash(word);
+ while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
+ vocab_hash[hash] = vocab_size - 1;
+ return vocab_size - 1;
+}
+
+// Used later for sorting by word counts
+int VocabCompare(const void *a, const void *b) {
+ return ((struct vocab_word *)b)->cn - ((struct vocab_word *)a)->cn;
+}
+
+// Sorts the vocabulary by frequency using word counts
+void SortVocab() {
+ int a, size;
+ unsigned int hash;
+ // Sort the vocabulary and keep </s> at the first position
+ qsort(&vocab[1], vocab_size - 1, sizeof(struct vocab_word), VocabCompare);
+ for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
+ size = vocab_size;
+ train_words = 0;
+ for (a = 0; a < size; a++) {
+ // Words occuring less than min_count times will be discarded from the vocab
+ if ((vocab[a].cn < min_count) && (a != 0)) {
+ vocab_size--;
+ free(vocab[a].word);
+ } else {
+ // Hash will be re-computed, as after the sorting it is not actual
+ hash=GetWordHash(vocab[a].word);
+ while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
+ vocab_hash[hash] = a;
+ train_words += vocab[a].cn;
+ }
+ }
+ vocab = (struct vocab_word *)realloc(vocab, (vocab_size + 1) * sizeof(struct vocab_word));
+ // Allocate memory for the binary tree construction
+ for (a = 0; a < vocab_size; a++) {
+ vocab[a].code = (char *)calloc(MAX_CODE_LENGTH, sizeof(char));
+ vocab[a].point = (int *)calloc(MAX_CODE_LENGTH, sizeof(int));
+ }
+}
+
+// Reduces the vocabulary by removing infrequent tokens
+void ReduceVocab() {
+ int a, b = 0;
+ unsigned int hash;
+ for (a = 0; a < vocab_size; a++) if (vocab[a].cn > min_reduce) {
+ vocab[b].cn = vocab[a].cn;
+ vocab[b].word = vocab[a].word;
+ b++;
+ } else free(vocab[a].word);
+ vocab_size = b;
+ for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
+ for (a = 0; a < vocab_size; a++) {
+ // Hash will be re-computed, as it is not actual
+ hash = GetWordHash(vocab[a].word);
+ while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
+ vocab_hash[hash] = a;
+ }
+ fflush(stdout);
+ min_reduce++;
+}
+
+// Create binary Huffman tree using the word counts
+// Frequent words will have short uniqe binary codes
+void CreateBinaryTree() {
+ long long a, b, i, min1i, min2i, pos1, pos2, point[MAX_CODE_LENGTH];
+ char code[MAX_CODE_LENGTH];
+ long long *count = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
+ long long *binary = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
+ long long *parent_node = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
+ for (a = 0; a < vocab_size; a++) count[a] = vocab[a].cn;
+ for (a = vocab_size; a < vocab_size * 2; a++) count[a] = 1e15;
+ pos1 = vocab_size - 1;
+ pos2 = vocab_size;
+ // Following algorithm constructs the Huffman tree by adding one node at a time
+ for (a = 0; a < vocab_size - 1; a++) {
+ // First, find two smallest nodes 'min1, min2'
+ if (pos1 >= 0) {
+ if (count[pos1] < count[pos2]) {
+ min1i = pos1;
+ pos1--;
+ } else {
+ min1i = pos2;
+ pos2++;
+ }
+ } else {
+ min1i = pos2;
+ pos2++;
+ }
+ if (pos1 >= 0) {
+ if (count[pos1] < count[pos2]) {
+ min2i = pos1;
+ pos1--;
+ } else {
+ min2i = pos2;
+ pos2++;
+ }
+ } else {
+ min2i = pos2;
+ pos2++;
+ }
+ count[vocab_size + a] = count[min1i] + count[min2i];
+ parent_node[min1i] = vocab_size + a;
+ parent_node[min2i] = vocab_size + a;
+ binary[min2i] = 1;
+ }
+ // Now assign binary code to each vocabulary word
+ for (a = 0; a < vocab_size; a++) {
+ b = a;
+ i = 0;
+ while (1) {
+ code[i] = binary[b];
+ point[i] = b;
+ i++;
+ b = parent_node[b];
+ if (b == vocab_size * 2 - 2) break;
+ }
+ vocab[a].codelen = i;
+ vocab[a].point[0] = vocab_size - 2;
+ for (b = 0; b < i; b++) {
+ vocab[a].code[i - b - 1] = code[b];
+ vocab[a].point[i - b] = point[b] - vocab_size;
+ }
+ }
+ free(count);
+ free(binary);
+ free(parent_node);
+}
+
+void LearnVocabFromTrainFile() {
+ char word[MAX_STRING];
+ FILE *fin;
+ long long a, i;
+ for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
+ for (a = 0; a < vocab_hash_size; a++) cngram_vocab_hash[a] = -1;
+ fin = fopen(train_file, "rb");
+ if (fin == NULL) {
+ printf("ERROR: training data file not found!\n");
+ exit(1);
+ }
+ vocab_size = 0;
+ AddWordToVocab((char *)"</s>");
+ AddWordNgramToVocab(unkNgramToken,1000000);
+ while (1) {
+ ReadWord(word, fin);
+ if (feof(fin)) break;
+ train_words++;
+ if ((debug_mode > 1) && (train_words % 100000 == 0)) {
+ printf("%lldK%c", train_words / 1000, 13);
+ fflush(stdout);
+ }
+ i = SearchVocab(word);
+ if (i == -1) {
+ a = AddWordToVocab(word);
+ vocab[a].cn = 1;
+ } else vocab[i].cn++;
+ if (vocab_size > vocab_hash_size * 0.7) ReduceVocab();
+ }
+ SortVocab();
+ for (a = 0; a < vocab_size; a++){
+ AddAllWordNgramToVocab(vocab[a].word, vocab[a].cn);
+ }
+ if (debug_mode > 0) {
+ printf("Vocab size: %lld\n", vocab_size);
+ printf("Ngrams size: %lld\n", cngram_vocab_size);
+ printf("Words in train file: %lld\n", train_words);
+ }
+ file_size = ftell(fin);
+ fclose(fin);
+}
+
+void SaveVocab() {
+ long long i;
+ FILE *fo = fopen(save_vocab_file, "wb");
+ for (i = 0; i < vocab_size; i++) fprintf(fo, "%s %lld\n", vocab[i].word, vocab[i].cn);
+ fclose(fo);
+}
+
+void ReadVocab() {
+ long long a, i = 0;
+ char c;
+ char word[MAX_STRING];
+ FILE *fin = fopen(read_vocab_file, "rb");
+ if (fin == NULL) {
+ printf("Vocabulary file not found\n");
+ exit(1);
+ }
+ for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
+ vocab_size = 0;
+ while (1) {
+ ReadWord(word, fin);
+ if (feof(fin)) break;
+ a = AddWordToVocab(word);
+ fscanf(fin, "%lld%c", &vocab[a].cn, &c);
+ i++;
+ }
+ SortVocab();
+ if (debug_mode > 0) {
+ printf("Vocab size: %lld\n", vocab_size);
+ printf("Words in train file: %lld\n", train_words);
+ }
+ fin = fopen(train_file, "rb");
+ if (fin == NULL) {
+ printf("ERROR: training data file not found!\n");
+ exit(1);
+ }
+ fseek(fin, 0, SEEK_END);
+ file_size = ftell(fin);
+ fclose(fin);
+}
+
+void InitClassUnigramTable() {
+ long long a,c;
+ printf("loading class unigrams \n");
+ FILE *fin = fopen(negative_classes_file, "rb");
+ if (fin == NULL) {
+ printf("ERROR: class file not found!\n");
+ exit(1);
+ }
+ word_to_group = (int *)malloc(vocab_size * sizeof(int));
+ for(a = 0; a < vocab_size; a++) word_to_group[a] = -1;
+ char class[MAX_STRING];
+ char prev_class[MAX_STRING];
+ prev_class[0] = 0;
+ char word[MAX_STRING];
+ class_number = -1;
+ while (1) {
+ if (feof(fin)) break;
+ ReadWord(class, fin);
+ ReadWord(word, fin);
+ int word_index = SearchVocab(word);
+ if (word_index != -1){
+ if(strcmp(class, prev_class) != 0){
+ class_number++;
+ strcpy(prev_class, class);
+ }
+ word_to_group[word_index] = class_number;
+ }
+ ReadWord(word, fin);
+ }
+ class_number++;
+ fclose(fin);
+
+ group_to_table = (int *)malloc(table_size * class_number * sizeof(int));
+ long long train_words_pow = 0;
+ real d1, power = 0.75;
+
+ for(c = 0; c < class_number; c++){
+ long long offset = c * table_size;
+ train_words_pow = 0;
+ for (a = 0; a < vocab_size; a++) if(word_to_group[a] == c) train_words_pow += pow(vocab[a].cn, power);
+ int i = 0;
+ while(word_to_group[i]!=c && i < vocab_size) i++;
+ d1 = pow(vocab[i].cn, power) / (real)train_words_pow;
+ for (a = 0; a < table_size; a++) {
+ //printf("index %lld , word %d\n", a, i);
+ group_to_table[offset + a] = i;
+ if (a / (real)table_size > d1) {
+ i++;
+ while(word_to_group[i]!=c && i < vocab_size) i++;
+ d1 += pow(vocab[i].cn, power) / (real)train_words_pow;
+ }
+ if (i >= vocab_size) while(word_to_group[i]!=c && i >= 0) i--;
+ }
+ }
+}
+
+void InitNet() {
+ long long a, b;
+ unsigned long long next_random = 1;
+ window_layer_size = layer1_size*window*2;
+ a = posix_memalign((void **)&syn0, 128, (long long)vocab_size * layer1_size * sizeof(real));
+ if (syn0 == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn0_cngram, 128, (long long)vocab_size * layer1_size * sizeof(real));
+ if (syn0_cngram == NULL) {printf("Memory allocation failed\n"); exit(1);}
+
+ if (hs) {
+ a = posix_memalign((void **)&syn1, 128, (long long)vocab_size * layer1_size * sizeof(real));
+ if (syn1 == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn1_window, 128, (long long)vocab_size * window_layer_size * sizeof(real));
+ if (syn1_window == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn_hidden_word, 128, (long long)vocab_size * window_hidden_size * sizeof(real));
+ if (syn_hidden_word == NULL) {printf("Memory allocation failed\n"); exit(1);}
+
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++)
+ syn1[a * layer1_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_layer_size; b++)
+ syn1_window[a * window_layer_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_hidden_size; b++)
+ syn_hidden_word[a * window_hidden_size + b] = 0;
+ }
+ if (negative>0) {
+ a = posix_memalign((void **)&syn1neg, 128, (long long)vocab_size * layer1_size * sizeof(real));
+ if (syn1neg == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn1neg_window, 128, (long long)vocab_size * window_layer_size * sizeof(real));
+ if (syn1neg_window == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn_hidden_word_neg, 128, (long long)vocab_size * window_hidden_size * sizeof(real));
+ if (syn_hidden_word_neg == NULL) {printf("Memory allocation failed\n"); exit(1);}
+
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++)
+ syn1neg[a * layer1_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_layer_size; b++)
+ syn1neg_window[a * window_layer_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_hidden_size; b++)
+ syn_hidden_word_neg[a * window_hidden_size + b] = 0;
+ }
+ if (nce>0) {
+ a = posix_memalign((void **)&syn1nce, 128, (long long)vocab_size * layer1_size * sizeof(real));
+ if (syn1nce == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn1nce_window, 128, (long long)vocab_size * window_layer_size * sizeof(real));
+ if (syn1nce_window == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn_hidden_word_nce, 128, (long long)vocab_size * window_hidden_size * sizeof(real));
+ if (syn_hidden_word_nce == NULL) {printf("Memory allocation failed\n"); exit(1);}
+
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++)
+ syn1nce[a * layer1_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_layer_size; b++)
+ syn1nce_window[a * window_layer_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_hidden_size; b++)
+ syn_hidden_word_nce[a * window_hidden_size + b] = 0;
+ }
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++) {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ syn0[a * layer1_size + b] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / layer1_size;
+ }
+
+ for (a = 0; a < cngram_vocab_size; a++) for (b = 0; b < layer1_size; b++){
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ syn0_cngram[a * layer1_size + b] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / layer1_size;
+ }
+
+ a = posix_memalign((void **)&syn_window_hidden, 128, window_hidden_size * window_layer_size * sizeof(real));
+ if (syn_window_hidden == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ for (a = 0; a < window_hidden_size * window_layer_size; a++){
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ syn_window_hidden[a] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / (window_hidden_size*window_layer_size);
+ }
+
+ CreateBinaryTree();
+}
+
+void *TrainModelThread(void *id) {
+ long long a, b, d, cw, word, last_word, sentence_length = 0, sentence_position = 0;
+ long long word_count = 0, last_word_count = 0, sen[MAX_SENTENCE_LENGTH + 1];
+ long long l1, l2, c, target, label, local_iter = iter;
+ unsigned long long next_random = (long long)id;
+ real f, g;
+ clock_t now;
+ int input_len_1 = layer1_size;
+ int window_offset = -1;
+ if(type == 2 || type == 4){
+ input_len_1=window_layer_size;
+ }
+ real *neu1 = (real *)calloc(input_len_1, sizeof(real));
+ real *neu1e = (real *)calloc(input_len_1, sizeof(real));
+
+ int input_len_2 = 0;
+ if(type == 4){
+ input_len_2 = window_hidden_size;
+ }
+ real *neu2 = (real *)calloc(input_len_2, sizeof(real));
+ real *neu2e = (real *)calloc(input_len_2, sizeof(real));
+
+ FILE *fi = fopen(train_file, "rb");
+ fseek(fi, file_size / (long long)num_threads * (long long)id, SEEK_SET);
+ while (1) {
+ if (word_count - last_word_count > 10000) {
+ word_count_actual += word_count - last_word_count;
+ last_word_count = word_count;
+ if ((debug_mode > 1)) {
+ now=clock();
+ printf("%cAlpha: %f Progress: %.2f%% Words/thread/sec: %.2fk ", 13, alpha,
+ word_count_actual / (real)(iter * train_words + 1) * 100,
+ word_count_actual / ((real)(now - start + 1) / (real)CLOCKS_PER_SEC * 1000));
+ fflush(stdout);
+ }
+ alpha = starting_alpha * (1 - word_count_actual / (real)(iter * train_words + 1));
+ if (alpha < starting_alpha * 0.0001) alpha = starting_alpha * 0.0001;
+ }
+ if (sentence_length == 0) {
+ while (1) {
+ word = ReadWordIndex(fi);
+ if (feof(fi)) break;
+ if (word == -1) continue;
+ word_count++;
+ if (word == 0) break;
+ // The subsampling randomly discards frequent words while keeping the ranking same
+ if (sample > 0) {
+ real ran = (sqrt(vocab[word].cn / (sample * train_words)) + 1) * (sample * train_words) / vocab[word].cn;
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if (ran < (next_random & 0xFFFF) / (real)65536) continue;
+ }
+ sen[sentence_length] = word;
+ sentence_length++;
+ if (sentence_length >= MAX_SENTENCE_LENGTH) break;
+ }
+ sentence_position = 0;
+ }
+ if (feof(fi) || (word_count > train_words / num_threads)) {
+ word_count_actual += word_count - last_word_count;
+ local_iter--;
+ if (local_iter == 0) break;
+ word_count = 0;
+ last_word_count = 0;
+ sentence_length = 0;
+ fseek(fi, file_size / (long long)num_threads * (long long)id, SEEK_SET);
+ continue;
+ }
+ word = sen[sentence_position];
+ if (word == -1) continue;
+ for (c = 0; c < input_len_1; c++) neu1[c] = 0;
+ for (c = 0; c < input_len_1; c++) neu1e[c] = 0;
+ for (c = 0; c < input_len_2; c++) neu2[c] = 0;
+ for (c = 0; c < input_len_2; c++) neu2e[c] = 0;
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ b = next_random % window;
+ if (type == 0) { //train the cbow architecture
+ // in -> hidden
+ cw = 0;
+ for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ ForwardCNgramWordRepresentation(neu1, vocab[last_word].word);
+ cw++;
+ }
+ if (cw) {
+ for (c = 0; c < layer1_size; c++) neu1[c] /= cw;
+ if (hs) for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * layer1_size;
+ // Propagate hidden -> output
+ for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1[c + l2];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * neu1[c];
+ if(cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1, c + l2);
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * layer1_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1neg[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg[c + l2];
+ for (c = 0; c < layer1_size; c++) syn1neg[c + l2] += g * neu1[c];
+ if (cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1neg, c + l2);
+ }
+ // Noise Contrastive Estimation
+ if (nce > 0) for (d = 0; d < nce + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * layer1_size;
+ f = 0;
+
+ for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1nce[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else {
+ f = exp(f);
+ g = (label - f/(noise_distribution[target]*nce + f)) * alpha;
+ }
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1nce[c + l2];
+ for (c = 0; c < layer1_size; c++) syn1nce[c + l2] += g * neu1[c];
+ if(cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1nce,c + l2);
+ }
+ // hidden -> in
+ for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ BackwardCNgramWordRepresentation(neu1, vocab[last_word].word, neu1e);
+ }
+ }
+ } else if(type==1) { //train skip-gram
+ for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ l1 = last_word * layer1_size;
+ ForwardCNgramWordRepresentation(neu1, vocab[last_word].word);
+ for (c = 0; c < layer1_size; c++) neu1e[c] = 0;
+ // HIERARCHICAL SOFTMAX
+ if (hs) for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * layer1_size;
+ // Propagate hidden -> output
+ for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1[c + l2];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * neu1[c];
+ if (cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1, c + l2);
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * layer1_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1neg[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg[c + l2];
+ for (c = 0; c < layer1_size; c++) syn1neg[c + l2] += g * neu1[c];
+ if (cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1neg, c + l2);
+ }
+ //Noise Contrastive Estimation
+ if (nce > 0) for (d = 0; d < nce + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * layer1_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1nce[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else {
+ f = exp(f);
+ g = (label - f/(noise_distribution[target]*nce + f)) * alpha;
+ }
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1nce[c + l2];
+ for (c = 0; c < layer1_size; c++) syn1nce[c + l2] += g * neu1[c];
+ if (cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1nce, c + l2);
+ }
+ // Learn weights input -> hidden
+ BackwardCNgramWordRepresentation(neu1, vocab[last_word].word, neu1e);
+ }
+ }
+ else if(type == 2){ //train the cwindow architecture
+ // in -> hidden
+ cw = 0;
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ window_offset = a*layer1_size;
+ if (a > window) window_offset-=layer1_size;
+ ForwardCNgramWordRepresentation(&neu1[window_offset], vocab[last_word].word);
+ cw++;
+ }
+ if (cw) {
+ if (hs) for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * window_layer_size;
+ // Propagate hidden -> output
+ for (c = 0; c < window_layer_size; c++) f += neu1[c] * syn1_window[c + l2];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < window_layer_size; c++) neu1e[c] += g * syn1_window[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < window_layer_size; c++) syn1_window[c + l2] += g * neu1[c];
+ if (cap == 1) for (c = 0; c < window_layer_size; c++) capParam(syn1_window, c + l2);
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * window_layer_size;
+ f = 0;
+ for (c = 0; c < window_layer_size; c++) f += neu1[c] * syn1neg_window[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
+ for (c = 0; c < window_layer_size; c++) neu1e[c] += g * syn1neg_window[c + l2];
+ for (c = 0; c < window_layer_size; c++) syn1neg_window[c + l2] += g * neu1[c];
+ if(cap == 1) for (c = 0; c < window_layer_size; c++) capParam(syn1neg_window, c + l2);
+ }
+ // Noise Contrastive Estimation
+ if (nce > 0) for (d = 0; d < nce + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * window_layer_size;
+ f = 0;
+ for (c = 0; c < window_layer_size; c++) f += neu1[c] * syn1nce_window[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else {
+ f = exp(f);
+ g = (label - f/(noise_distribution[target]*nce + f)) * alpha;
+ }
+ for (c = 0; c < window_layer_size; c++) neu1e[c] += g * syn1nce_window[c + l2];
+ for (c = 0; c < window_layer_size; c++) syn1nce_window[c + l2] += g * neu1[c];
+ if(cap == 1) for (c = 0; c < window_layer_size; c++) capParam(syn1nce_window, c + l2);
+ }
+ // hidden -> in
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ window_offset = a * layer1_size;
+ if(a > window) window_offset -= layer1_size;
+ BackwardCNgramWordRepresentation(&neu1[window_offset], vocab[last_word].word, &neu1e[window_offset]);
+ }
+ }
+ }
+ else if (type == 3){ //train structured skip-gram
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ l1 = last_word * layer1_size;
+ window_offset = a * layer1_size;
+ if(a > window) window_offset -= layer1_size;
+ ForwardCNgramWordRepresentation(neu1, vocab[last_word].word);
+ for (c = 0; c < layer1_size; c++) neu1e[c] = 0;
+ // HIERARCHICAL SOFTMAX
+ if (hs) for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * window_layer_size;
+ // Propagate hidden -> output
+ for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1_window[c + l2 + window_offset];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1_window[c + l2 + window_offset];
+ // Learn weights hidden -> output
+ for (c = 0; c < layer1_size; c++) syn1[c + l2 + window_offset] += g * neu1[c];
+ if(cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1, c + l2 + window_offset);
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * window_layer_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1neg_window[c + l2 + window_offset];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg_window[c + l2 + window_offset];
+ for (c = 0; c < layer1_size; c++) syn1neg_window[c + l2 + window_offset] += g * neu1[c];
+ if(cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1neg_window, c + l2 + window_offset);
+ }
+ // Noise Constrastive Estimation
+ if (nce > 0) for (d = 0; d < nce + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * window_layer_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1nce_window[c + l2 + window_offset];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else {
+ f = exp(f);
+ g = (label - f/(noise_distribution[target]*nce + f)) * alpha;
+ }
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1nce_window[c + l2 + window_offset];
+ for (c = 0; c < layer1_size; c++) syn1nce_window[c + l2 + window_offset] += g * neu1[c];
+ if (cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1nce_window, c + l2 + window_offset);
+ }
+ // Learn weights input -> hidden
+ BackwardCNgramWordRepresentation(neu1, vocab[last_word].word, neu1e);
+ }
+ }
+ else if(type == 4){ //training senna
+ // in -> hidden
+ cw = 0;
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ window_offset = a*layer1_size;
+ if (a > window) window_offset-=layer1_size;
+ for (c = 0; c < layer1_size; c++) neu1[c+window_offset] += syn0[c + last_word * layer1_size];
+ cw++;
+ }
+ if (cw) {
+ for (a = 0; a < window_hidden_size; a++){
+ c = a*window_layer_size;
+ for(b = 0; b < window_layer_size; b++){
+ neu2[a] += syn_window_hidden[c + b] * neu1[b];
+ }
+ }
+ if (hs) for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * window_hidden_size;
+ // Propagate hidden -> output
+ for (c = 0; c < window_hidden_size; c++) f += hardTanh(neu2[c]) * syn_hidden_word[c + l2];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < window_hidden_size; c++) neu2e[c] += dHardTanh(neu2[c],g) * g * syn_hidden_word[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < window_hidden_size; c++) syn_hidden_word[c + l2] += dHardTanh(neu2[c],g) * g * neu2[c];
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * window_hidden_size;
+ f = 0;
+ for (c = 0; c < window_hidden_size; c++) f += hardTanh(neu2[c]) * syn_hidden_word_neg[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha / negative;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha / negative;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha / negative;
+ for (c = 0; c < window_hidden_size; c++) neu2e[c] += dHardTanh(neu2[c],g) * g * syn_hidden_word_neg[c + l2];
+ for (c = 0; c < window_hidden_size; c++) syn_hidden_word_neg[c + l2] += dHardTanh(neu2[c],g) * g * neu2[c];
+ }
+ for (a = 0; a < window_hidden_size; a++)
+ for(b = 0; b < window_layer_size; b++)
+ neu1e[b] += neu2e[a] * syn_window_hidden[a*window_layer_size + b];
+ for (a = 0; a < window_hidden_size; a++)
+ for(b = 0; b < window_layer_size; b++)
+ syn_window_hidden[a*window_layer_size + b] += neu2e[a] * neu1[b];
+ // hidden -> in
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ window_offset = a * layer1_size;
+ if(a > window) window_offset -= layer1_size;
+ for (c = 0; c < layer1_size; c++) syn0[c + last_word * layer1_size] += neu1e[c + window_offset];
+ }
+ }
+ }
+ else{
+ printf("unknown type %i", type);
+ exit(0);
+ }
+ sentence_position++;
+ if (sentence_position >= sentence_length) {
+ sentence_length = 0;
+ continue;
+ }
+ }
+ fclose(fi);
+ free(neu1);
+ free(neu1e);
+ pthread_exit(NULL);
+}
+
+void TrainModel() {
+ long a, b;
+ long extra_words;
+ FILE *fo;
+ FILE *fi;
+ pthread_t *pt = (pthread_t *)malloc(num_threads * sizeof(pthread_t));
+ printf("Starting training using file %s\n", train_file);
+ starting_alpha = alpha;
+ if (read_vocab_file[0] != 0) ReadVocab(); else LearnVocabFromTrainFile();
+ if (save_vocab_file[0] != 0) SaveVocab();
+ if (output_file[0] == 0) return;
+ InitNet();
+ if (negative > 0 || nce > 0) InitUnigramTable();
+ if (negative_classes_file[0] != 0) InitClassUnigramTable();
+ start = clock();
+ for (a = 0; a < num_threads; a++) pthread_create(&pt[a], NULL, TrainModelThread, (void *)a);
+ for (a = 0; a < num_threads; a++) pthread_join(pt[a], NULL);
+ fo = fopen(output_file, "wb");
+ if (classes == 0) {
+ // Save the word vectors
+ real neu1[layer1_size];
+
+ // count extra words
+ extra_words = 0;
+ fi = fopen(extra_vocab_file, "rb");
+ if (fi != NULL) {
+ char word[MAX_STRING];
+ while(1){
+ ReadWord(word, fi);
+ if(feof(fi)) break;
+ extra_words++;
+ ReadWord(word, fi);
+ }
+ }
+ fclose(fi);
+ fprintf(fo, "%lld %lld\n", vocab_size + extra_words, layer1_size);
+ for (a = 0; a < vocab_size; a++) {
+ fprintf(fo, "%s ", vocab[a].word);
+ for (b = 0; b < layer1_size; b++) neu1[b] = 0;
+ ForwardCNgramWordRepresentation(neu1, vocab[a].word);
+
+ if (binary) for (b = 0; b < layer1_size; b++) fwrite(&neu1[b], sizeof(real), 1, fo);
+ else for (b = 0; b < layer1_size; b++) fprintf(fo, "%lf ", neu1[b]);
+ fprintf(fo, "\n");
+ }
+ fi = fopen(extra_vocab_file, "rb");
+ if (fi != NULL) {
+ char word[MAX_STRING];
+ while(1){
+ ReadWord(word, fi);
+ if(feof(fi)) break;
+ for (b = 0; b < layer1_size; b++) neu1[b] = 0;
+ fprintf(fo, "%s ", word);
+ ForwardCNgramWordRepresentation(neu1, word);
+ if (binary) for (b = 0; b < layer1_size; b++) fwrite(&neu1[b], sizeof(real), 1, fo);
+ else for (b = 0; b < layer1_size; b++) fprintf(fo, "%lf ", neu1[b]);
+ fprintf(fo, "\n");
+ ReadWord(word, fi);
+ }
+ }
+ fclose(fi);
+ }
+ fclose(fo);
+}
+
+int ArgPos(char *str, int argc, char **argv) {
+ int a;
+ for (a = 1; a < argc; a++) if (!strcmp(str, argv[a])) {
+ if (a == argc - 1) {
+ printf("Argument missing for %s\n", str);
+ exit(1);
+ }
+ return a;
+ }
+ return -1;
+}
+
+int main(int argc, char **argv) {
+ int i;
+ if (argc == 1) {
+ printf("WORD VECTOR estimation toolkit v 0.1c\n\n");
+ printf("Options:\n");
+ printf("Parameters for training:\n");
+ printf("\t-train <file>\n");
+ printf("\t\tUse text data from <file> to train the model\n");
+ printf("\t-output <file>\n");
+ printf("\t\tUse <file> to save the resulting word vectors / word clusters\n");
+ printf("\t-size <int>\n");
+ printf("\t\tSet size of word vectors; default is 100\n");
+ printf("\t-window <int>\n");
+ printf("\t\tSet max skip length between words; default is 5\n");
+ printf("\t-sample <float>\n");
+ printf("\t\tSet threshold for occurrence of words. Those that appear with higher frequency in the training data\n");
+ printf("\t\twill be randomly down-sampled; default is 1e-3, useful range is (0, 1e-5)\n");
+ printf("\t-hs <int>\n");
+ printf("\t\tUse Hierarchical Softmax; default is 0 (not used)\n");
+ printf("\t-negative <int>\n");
+ printf("\t\tNumber of negative examples; default is 5, common values are 3 - 10 (0 = not used)\n");
+ printf("\t-negative-classes <file>\n");
+ printf("\t\tNegative classes to sample from\n");
+ printf("\t-nce <int>\n");
+ printf("\t\tNumber of negative examples for nce; default is 5, common values are 3 - 10 (0 = not used)\n");
+ printf("\t-threads <int>\n");
+ printf("\t\tUse <int> threads (default 12)\n");
+ printf("\t-iter <int>\n");
+ printf("\t\tRun more training iterations (default 5)\n");
+ printf("\t-cngram-size <int>\n");
+ printf("\t\tUse <int> size of the character ngrams (default 4)\n");
+ printf("\t-extra_vocab_file <file>\n");
+ printf("\t\tUse <file> file with extra words (one per line)\n");
+ printf("\t-min-count <int>\n");
+ printf("\t\tThis will discard words that appear less than <int> times; default is 5\n");
+ printf("\t-alpha <float>\n");
+ printf("\t\tSet the starting learning rate; default is 0.025 for skip-gram and 0.05 for CBOW\n");
+ printf("\t-classes <int>\n");
+ printf("\t\tOutput word classes rather than word vectors; default number of classes is 0 (vectors are written)\n");
+ printf("\t-debug <int>\n");
+ printf("\t\tSet the debug mode (default = 2 = more info during training)\n");
+ printf("\t-binary <int>\n");
+ printf("\t\tSave the resulting vectors in binary moded; default is 0 (off)\n");
+ printf("\t-save-vocab <file>\n");
+ printf("\t\tThe vocabulary will be saved to <file>\n");
+ printf("\t-read-vocab <file>\n");
+ printf("\t\tThe vocabulary will be read from <file>, not constructed from the training data\n");
+ printf("\t-type <int>\n");
+ printf("\t\tType of embeddings (0 for cbow, 1 for skipngram, 2 for cwindow, 3 for structured skipngram, 4 for senna type)\n");
+ printf("\t-cap <int>\n");
+ printf("\t\tlimit the parameter values to the range [-50, 50]; default is 0 (off)\n");
+ printf("\nExamples:\n");
+ printf("./word2vec -train data.txt -output vec.txt -size 200 -window 5 -sample 1e-4 -negative 5 -hs 0 -binary 0 -type 1 -iter 3 -cngram-size 4 -extra_vocab_file extra.txt \n\n");
+ return 0;
+ }
+ output_file[0] = 0;
+ save_vocab_file[0] = 0;
+ read_vocab_file[0] = 0;
+ negative_classes_file[0] = 0;
+ if ((i = ArgPos((char *)"-size", argc, argv)) > 0) layer1_size = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-train", argc, argv)) > 0) strcpy(train_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-save-vocab", argc, argv)) > 0) strcpy(save_vocab_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-read-vocab", argc, argv)) > 0) strcpy(read_vocab_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-debug", argc, argv)) > 0) debug_mode = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-binary", argc, argv)) > 0) binary = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-type", argc, argv)) > 0) type = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-output", argc, argv)) > 0) strcpy(output_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-window", argc, argv)) > 0) window = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-sample", argc, argv)) > 0) sample = atof(argv[i + 1]);
+ if ((i = ArgPos((char *)"-hs", argc, argv)) > 0) hs = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-negative", argc, argv)) > 0) negative = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-negative-classes", argc, argv)) > 0) strcpy(negative_classes_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-nce", argc, argv)) > 0) nce = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-threads", argc, argv)) > 0) num_threads = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-iter", argc, argv)) > 0) iter = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-min-count", argc, argv)) > 0) min_count = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-classes", argc, argv)) > 0) classes = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-cap", argc, argv)) > 0) cap = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-cngram-size", argc, argv)) > 0) cngram_size = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-extra_vocab_file", argc, argv)) > 0) strcpy(extra_vocab_file, argv[i + 1]);
+ if (type==0 || type==2 || type==4) alpha = 0.05;
+ if ((i = ArgPos((char *)"-alpha", argc, argv)) > 0) alpha = atof(argv[i + 1]);
+ vocab = (struct vocab_word *)calloc(vocab_max_size, sizeof(struct vocab_word));
+ vocab_hash = (int *)calloc(vocab_hash_size, sizeof(int));
+ cngram_vocab = (struct vocab_word *)calloc(cngram_vocab_max_size, sizeof(struct vocab_word));
+ cngram_vocab_hash = (int *)calloc(vocab_hash_size, sizeof(int));
+ expTable = (real *)malloc((EXP_TABLE_SIZE + 1) * sizeof(real));
+ for (i = 0; i < EXP_TABLE_SIZE; i++) {
+ expTable[i] = exp((i / (real)EXP_TABLE_SIZE * 2 - 1) * MAX_EXP); // Precompute the exp() table
+ expTable[i] = expTable[i] / (expTable[i] + 1); // Precompute f(x) = x / (x + 1)
+ }
+ TrainModel();
+ return 0;
+}
+
diff --git a/compute-accuracy.c b/compute-accuracy.c
new file mode 100644
index 0000000..95a83e4
--- /dev/null
+++ b/compute-accuracy.c
@@ -0,0 +1,143 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include <stdlib.h>
+#include <ctype.h>
+
+const long long max_size = 2000; // max length of strings
+const long long N = 1; // number of closest words
+const long long max_w = 50; // max length of vocabulary entries
+
+int main(int argc, char **argv)
+{
+ FILE *f;
+ char st1[max_size], st2[max_size], st3[max_size], st4[max_size], bestw[N][max_size], file_name[max_size];
+ float dist, len, bestd[N], vec[max_size];
+ long long words, size, a, b, c, d, b1, b2, b3, threshold = 0;
+ float *M;
+ char *vocab;
+ int TCN, CCN = 0, TACN = 0, CACN = 0, SECN = 0, SYCN = 0, SEAC = 0, SYAC = 0, QID = 0, TQ = 0, TQS = 0;
+ if (argc < 2) {
+ printf("Usage: ./compute-accuracy <FILE> <threshold>\nwhere FILE contains word projections, and threshold is used to reduce vocabulary of the model for fast approximate evaluation (0 = off, otherwise typical value is 30000)\n");
+ return 0;
+ }
+ strcpy(file_name, argv[1]);
+ if (argc > 2) threshold = atoi(argv[2]);
+ f = fopen(file_name, "rb");
+ if (f == NULL) {
+ printf("Input file not found\n");
+ return -1;
+ }
+ fscanf(f, "%lld", &words);
+ if (threshold) if (words > threshold) words = threshold;
+ fscanf(f, "%lld", &size);
+ vocab = (char *)malloc(words * max_w * sizeof(char));
+ M = (float *)malloc(words * size * sizeof(float));
+ if (M == NULL) {
+ printf("Cannot allocate memory: %lld MB\n", words * size * sizeof(float) / 1048576);
+ return -1;
+ }
+ for (b = 0; b < words; b++) {
+ a = 0;
+ while (1) {
+ vocab[b * max_w + a] = fgetc(f);
+ if (feof(f) || (vocab[b * max_w + a] == ' ')) break;
+ if ((a < max_w) && (vocab[b * max_w + a] != '\n')) a++;
+ }
+ vocab[b * max_w + a] = 0;
+ for (a = 0; a < max_w; a++) vocab[b * max_w + a] = toupper(vocab[b * max_w + a]);
+ for (a = 0; a < size; a++) fread(&M[a + b * size], sizeof(float), 1, f);
+ len = 0;
+ for (a = 0; a < size; a++) len += M[a + b * size] * M[a + b * size];
+ len = sqrt(len);
+ for (a = 0; a < size; a++) M[a + b * size] /= len;
+ }
+ fclose(f);
+ TCN = 0;
+ while (1) {
+ for (a = 0; a < N; a++) bestd[a] = 0;
+ for (a = 0; a < N; a++) bestw[a][0] = 0;
+ scanf("%s", st1);
+ for (a = 0; a < strlen(st1); a++) st1[a] = toupper(st1[a]);
+ if ((!strcmp(st1, ":")) || (!strcmp(st1, "EXIT")) || feof(stdin)) {
+ if (TCN == 0) TCN = 1;
+ if (QID != 0) {
+ printf("ACCURACY TOP1: %.2f %% (%d / %d)\n", CCN / (float)TCN * 100, CCN, TCN);
+ printf("Total accuracy: %.2f %% Semantic accuracy: %.2f %% Syntactic accuracy: %.2f %% \n", CACN / (float)TACN * 100, SEAC / (float)SECN * 100, SYAC / (float)SYCN * 100);
+ }
+ QID++;
+ scanf("%s", st1);
+ if (feof(stdin)) break;
+ printf("%s:\n", st1);
+ TCN = 0;
+ CCN = 0;
+ continue;
+ }
+ if (!strcmp(st1, "EXIT")) break;
+ scanf("%s", st2);
+ for (a = 0; a < strlen(st2); a++) st2[a] = toupper(st2[a]);
+ scanf("%s", st3);
+ for (a = 0; a<strlen(st3); a++) st3[a] = toupper(st3[a]);
+ scanf("%s", st4);
+ for (a = 0; a < strlen(st4); a++) st4[a] = toupper(st4[a]);
+ for (b = 0; b < words; b++) if (!strcmp(&vocab[b * max_w], st1)) break;
+ b1 = b;
+ for (b = 0; b < words; b++) if (!strcmp(&vocab[b * max_w], st2)) break;
+ b2 = b;
+ for (b = 0; b < words; b++) if (!strcmp(&vocab[b * max_w], st3)) break;
+ b3 = b;
+ for (a = 0; a < N; a++) bestd[a] = 0;
+ for (a = 0; a < N; a++) bestw[a][0] = 0;
+ TQ++;
+ if (b1 == words) continue;
+ if (b2 == words) continue;
+ if (b3 == words) continue;
+ for (b = 0; b < words; b++) if (!strcmp(&vocab[b * max_w], st4)) break;
+ if (b == words) continue;
+ for (a = 0; a < size; a++) vec[a] = (M[a + b2 * size] - M[a + b1 * size]) + M[a + b3 * size];
+ TQS++;
+ for (c = 0; c < words; c++) {
+ if (c == b1) continue;
+ if (c == b2) continue;
+ if (c == b3) continue;
+ dist = 0;
+ for (a = 0; a < size; a++) dist += vec[a] * M[a + c * size];
+ for (a = 0; a < N; a++) {
+ if (dist > bestd[a]) {
+ for (d = N - 1; d > a; d--) {
+ bestd[d] = bestd[d - 1];
+ strcpy(bestw[d], bestw[d - 1]);
+ }
+ bestd[a] = dist;
+ strcpy(bestw[a], &vocab[c * max_w]);
+ break;
+ }
+ }
+ }
+ if (!strcmp(st4, bestw[0])) {
+ CCN++;
+ CACN++;
+ if (QID <= 5) SEAC++; else SYAC++;
+ }
+ if (QID <= 5) SECN++; else SYCN++;
+ TCN++;
+ TACN++;
+ }
+ printf("Questions seen / total: %d %d %.2f %% \n", TQS, TQ, TQS/(float)TQ*100);
+ return 0;
+}
diff --git a/distance.c b/distance.c
new file mode 100644
index 0000000..e01ba9b
--- /dev/null
+++ b/distance.c
@@ -0,0 +1,164 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include <stdlib.h>
+
+const long long max_size = 2000; // max length of strings
+const long long N = 40; // number of closest words that will be shown
+const long long max_w = 50; // max length of vocabulary entries
+
+#define MAX_STRING 100
+void ReadWord(char *word, FILE *fin) {
+ int a = 0, ch;
+ while (!feof(fin)) {
+ ch = fgetc(fin);
+ if (ch == 13) continue;
+ if ((ch == ' ') || (ch == '\t') || (ch == '\n')) {
+ if (a > 0) {
+ if (ch == '\n') ungetc(ch, fin);
+ break;
+ }
+ if (ch == '\n') {
+ strcpy(word, (char *)"</s>");
+ return;
+ } else continue;
+ }
+ word[a] = ch;
+ a++;
+ if (a >= MAX_STRING - 1) a--; // Truncate too long words
+ }
+ word[a] = 0;
+}
+
+int main(int argc, char **argv) {
+ FILE *f;
+ char st1[max_size];
+ char *bestw[N];
+ char file_name[max_size], st[100][max_size];
+ float dist, len, bestd[N], vec[max_size];
+ long long words, size, a, b, c, d, cn, bi[100];
+ float *M;
+ char *vocab;
+ if (argc < 2) {
+ printf("Usage: ./distance <FILE>\nwhere FILE contains word projections in the BINARY FORMAT\n");
+ return 0;
+ }
+ strcpy(file_name, argv[1]);
+ f = fopen(file_name, "rb");
+ if (f == NULL) {
+ printf("Input file not found\n");
+ return -1;
+ }
+ fscanf(f, "%lld", &words);
+ fscanf(f, "%lld", &size);
+ vocab = (char *)malloc((long long)words * max_w * sizeof(char));
+ for (a = 0; a < N; a++) bestw[a] = (char *)malloc(max_size * sizeof(char));
+ M = (float *)malloc((long long)words * (long long)size * sizeof(float));
+ if (M == NULL) {
+ printf("Cannot allocate memory: %lld MB %lld %lld\n", (long long)words * size * sizeof(float) / 1048576, words, size);
+ return -1;
+ }
+ for (b = 0; b < words; b++) {
+ a = 0;
+ while (1) {
+ vocab[b * max_w + a] = fgetc(f);
+ if (feof(f) || (vocab[b * max_w + a] == ' ')) break;
+ if ((a < max_w) && (vocab[b * max_w + a] != '\n')) a++;
+ }
+ vocab[b * max_w + a] = 0;
+ for (a = 0; a < size; a++) fread(&M[a + b * size], sizeof(float), 1, f);
+ len = 0;
+ for (a = 0; a < size; a++) len += M[a + b * size] * M[a + b * size];
+ len = sqrt(len);
+ for (a = 0; a < size; a++) M[a + b * size] /= len;
+ }
+ fclose(f);
+ while (1) {
+ for (a = 0; a < N; a++) bestd[a] = 0;
+ for (a = 0; a < N; a++) bestw[a][0] = 0;
+ printf("Enter word or sentence (EXIT to break): ");
+ a = 0;
+ while (1) {
+ st1[a] = fgetc(stdin);
+ if ((st1[a] == '\n') || (a >= max_size - 1)) {
+ st1[a] = 0;
+ break;
+ }
+ a++;
+ }
+ if (!strcmp(st1, "EXIT")) break;
+ cn = 0;
+ b = 0;
+ c = 0;
+ while (1) {
+ st[cn][b] = st1[c];
+ b++;
+ c++;
+ st[cn][b] = 0;
+ if (st1[c] == 0) break;
+ if (st1[c] == ' ') {
+ cn++;
+ b = 0;
+ c++;
+ }
+ }
+ cn++;
+ for (a = 0; a < cn; a++) {
+ for (b = 0; b < words; b++) if (!strcmp(&vocab[b * max_w], st[a])) break;
+ if (b == words) b = -1;
+ bi[a] = b;
+ printf("\nWord: %s Position in vocabulary: %lld\n", st[a], bi[a]);
+ if (b == -1) {
+ printf("Out of dictionary word!\n");
+ break;
+ }
+ }
+ if (b == -1) continue;
+ printf("\n Word Cosine distance\n------------------------------------------------------------------------\n");
+ for (a = 0; a < size; a++) vec[a] = 0;
+ for (b = 0; b < cn; b++) {
+ if (bi[b] == -1) continue;
+ for (a = 0; a < size; a++) vec[a] += M[a + bi[b] * size];
+ }
+ len = 0;
+ for (a = 0; a < size; a++) len += vec[a] * vec[a];
+ len = sqrt(len);
+ for (a = 0; a < size; a++) vec[a] /= len;
+ for (a = 0; a < N; a++) bestd[a] = -1;
+ for (a = 0; a < N; a++) bestw[a][0] = 0;
+ for (c = 0; c < words; c++) {
+ a = 0;
+ for (b = 0; b < cn; b++) if (bi[b] == c) a = 1;
+ if (a == 1) continue;
+ dist = 0;
+ for (a = 0; a < size; a++) dist += vec[a] * M[a + c * size];
+ for (a = 0; a < N; a++) {
+ if (dist > bestd[a]) {
+ for (d = N - 1; d > a; d--) {
+ bestd[d] = bestd[d - 1];
+ strcpy(bestw[d], bestw[d - 1]);
+ }
+ bestd[a] = dist;
+ strcpy(bestw[a], &vocab[c * max_w]);
+ break;
+ }
+ }
+ }
+ for (a = 0; a < N; a++) printf("%50s\t\t%f\n", bestw[a], bestd[a]);
+ }
+ return 0;
+}
diff --git a/distance_fast.c b/distance_fast.c
new file mode 100644
index 0000000..8b143e9
--- /dev/null
+++ b/distance_fast.c
@@ -0,0 +1,261 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include <stdlib.h>
+#include <time.h>
+
+const long long max_size = 2000; // max length of strings
+const long long N = 10; // number of closest words that will be shown
+const long long max_w = 50; // max length of vocabulary entries
+
+#define MAX_STRING 100
+void ReadWord(char *word, FILE *fin) {
+ int a = 0, ch;
+ while (!feof(fin)) {
+ ch = fgetc(fin);
+ if (ch == 13) continue;
+ if ((ch == ' ') || (ch == '\t') || (ch == '\n')) {
+ if (a > 0) {
+ if (ch == '\n') ungetc(ch, fin);
+ break;
+ }
+ if (ch == '\n') {
+ strcpy(word, (char *)"</s>");
+ return;
+ } else continue;
+ }
+ word[a] = ch;
+ a++;
+ if (a >= MAX_STRING - 1) a--; // Truncate too long words
+ }
+ word[a] = 0;
+}
+
+int main(int argc, char **argv) {
+ FILE *f;
+ char st1[max_size];
+ char *bestw[N];
+ char file_name[max_size], st[100][max_size];
+ float dist, len, bestd[N], bestclasses[N], vec[max_size];
+ int bestclasses_ids[N];
+ long long words, size, a, b, c, d, e, cn, bi[100];
+ float *M;
+ char *vocab;
+ char word[MAX_STRING];
+ clock_t begin;
+ if (argc < 2) {
+ printf("Usage: ./kmeans_txt <FILE>\nwhere FILE contains features\n <number_of_classes>");
+ return 0;
+ }
+ strcpy(file_name, argv[1]);
+ int classes = atoi(argv[2]);
+ f = fopen(file_name, "rb");
+ if (f == NULL) {
+ printf("Input file not found\n");
+ return -1;
+ }
+
+ printf("reading data\n");
+ ReadWord(word, f);
+ words = atoi(word);
+ ReadWord(word, f);
+ size = atoi(word);
+ vocab = (char *)malloc((long long)words * max_w * sizeof(char));
+ for (a = 0; a < N; a++) bestw[a] = (char *)malloc(max_size * sizeof(char));
+ M = (float *)malloc((long long)words * (long long)size * sizeof(float));
+ if (M == NULL) {
+ printf("Cannot allocate memory: %lld MB %lld %lld\n", (long long)words * size * sizeof(float) / 1048576, words, size);
+ return -1;
+ }
+ for (b = 0; b < words; b++) {
+ a = 0;
+ while (1) {
+ vocab[b * max_w + a] = fgetc(f);
+ if (feof(f) || (vocab[b * max_w + a] == ' ')) break;
+ if ((a < max_w) && (vocab[b * max_w + a] != '\n')) a++;
+ }
+ vocab[b * max_w + a] = 0;
+ for (a = 0; a < size; a++) {
+ ReadWord(word,f);
+ M[a + b * size] = atof(word);
+ }
+ len = 0;
+ for (a = 0; a < size; a++) len += M[a + b * size] * M[a + b * size];
+ len = sqrt(len);
+ for (a = 0; a < size; a++) M[a + b * size] /= len;
+ }
+ fclose(f);
+
+ //run kmeans
+ printf("running k-means with %i classes...\n",classes);
+ int clcn = classes, iter = 10, closeid;
+ int *centcn = (int *)malloc(classes * sizeof(int));
+ int *cl = (int *)calloc(words, sizeof(int));
+ float closev, x;
+ float *cent = (float *)calloc(classes * size, sizeof(float));
+ for (a = 0; a < words; a++) cl[a] = a % clcn;
+ for (a = 0; a < iter; a++) {
+ for (b = 0; b < clcn * size; b++) cent[b] = 0;
+ for (b = 0; b < clcn; b++) centcn[b] = 1;
+ for (c = 0; c < words; c++) {
+ for (d = 0; d < size; d++) cent[size * cl[c] + d] += M[c * size + d];
+ centcn[cl[c]]++;
+ }
+ for (b = 0; b < clcn; b++) {
+ closev = 0;
+ for (c = 0; c < size; c++) {
+ cent[size * b + c] /= centcn[b];
+ closev += cent[size * b + c] * cent[size * b + c];
+ }
+ closev = sqrt(closev);
+ for (c = 0; c < size; c++) cent[size * b + c] /= closev;
+ }
+ for (c = 0; c < words; c++) {
+ closev = -10;
+ closeid = 0;
+ for (d = 0; d < clcn; d++) {
+ x = 0;
+ for (b = 0; b < size; b++) x += cent[size * d + b] * M[c * size + b];
+ if (x > closev) {
+ closev = x;
+ closeid = d;
+ }
+ }
+ cl[c] = closeid;
+ }
+ }
+
+ // build an array of words ordered by class and their offsets (index where each class starts)
+ int class_words[words];
+ int class_offsets[classes];
+ for(a = 0; a < classes; a++) class_offsets[a]=0;
+ for(a = 0; a < words; a++) class_offsets[cl[a]]++;
+ for(a = 1; a < classes; a++) class_offsets[a] += class_offsets[a-1];
+ for(a = 0; a < words; a++) class_words[--class_offsets[cl[a]]] = a;
+
+ //reading from input
+ while (1) {
+ for (a = 0; a < N; a++) bestd[a] = 0;
+ for (a = 0; a < N; a++) bestclasses[a] = 0;
+ for (a = 0; a < N; a++) bestw[a][0] = 0;
+ printf("Enter word or sentence (EXIT to break): ");
+ a = 0;
+ while (1) {
+ st1[a] = fgetc(stdin);
+ if ((st1[a] == '\n') || (a >= max_size - 1)) {
+ st1[a] = 0;
+ break;
+ }
+ a++;
+ }
+ if (!strcmp(st1, "EXIT")) break;
+ cn = 0;
+ b = 0;
+ c = 0;
+ while (1) {
+ st[cn][b] = st1[c];
+ b++;
+ c++;
+ st[cn][b] = 0;
+ if (st1[c] == 0) break;
+ if (st1[c] == ' ') {
+ cn++;
+ b = 0;
+ c++;
+ }
+ }
+ cn++;
+ for (a = 0; a < cn; a++) {
+ for (b = 0; b < words; b++) if (!strcmp(&vocab[b * max_w], st[a])) break;
+ if (b == words) b = -1;
+ bi[a] = b;
+ printf("\nWord: %s Position in vocabulary: %lld\n", st[a], bi[a]);
+ if (b == -1) {
+ printf("Out of dictionary word!\n");
+ break;
+ }
+ }
+ if (b == -1) continue;
+ begin = clock();
+
+ printf("\n Word Cosine distance\n------------------------------------------------------------------------\n");
+
+ for (a = 0; a < size; a++) vec[a] = 0;
+ for (b = 0; b < cn; b++) {
+ if (bi[b] == -1) continue;
+ for (a = 0; a < size; a++) vec[a] += M[a + bi[b] * size];
+ }
+
+ len = 0;
+ for (a = 0; a < size; a++) len += vec[a] * vec[a];
+ len = sqrt(len);
+ for (a = 0; a < size; a++) vec[a] /= len;
+
+ // find top N centroids
+ for (a = 0; a < N; a++) bestclasses[a] = -1;
+ for (a = 0; a < N; a++) bestclasses_ids[a] = -1;
+ for (c = 0; c < classes; c++){
+ dist = 0;
+ for (a = 0; a < size; a++) dist += vec[a] * cent[a + size * c];
+ for (a = 0; a < N; a++) {
+ if (dist > bestclasses[a]) {
+ for(d = N - 1; d > a; d--){
+ bestclasses[d] = bestclasses[d-1];
+ bestclasses_ids[d] = bestclasses_ids[d-1];
+ }
+ bestclasses[a] = dist;
+ bestclasses_ids[a] = c;
+ break;
+ }
+ }
+ }
+
+ // find top N words in the centroids
+ for (a = 0; a < N; a++) bestd[a] = -1;
+ for (a = 0; a < N; a++) bestw[a][0] = 0;
+ for (a = 0; a < N; a++){
+ c = words;
+ if(bestclasses_ids[a] < classes-1) c = class_offsets[bestclasses_ids[a]+1];
+ b = class_offsets[bestclasses_ids[a]];
+ for(; b < c; b++){
+ dist = 0;
+ for (d = 0; d < size; d++) dist += vec[d] * M[d + class_words[b] * size];
+ for (d = 0; d < N; d++){
+ if(dist > bestd[d]){
+ for (e = N -1; e > d; e--){
+ bestd[e] = bestd[e-1];
+ strcpy(bestw[e], bestw[e-1]);
+ }
+ bestd[d] = dist;
+ strcpy(bestw[d], &vocab[class_words[b] * max_w]);
+ break;
+ }
+ }
+ }
+ }
+ for (a = 0; a < N; a++) printf("%50s\t\t%f\n", bestw[a], bestd[a]);
+ printf("time spent = %f seconds\n", (double)(clock() - begin) / CLOCKS_PER_SEC);
+ }
+ // Save the K-means classes
+
+ free(centcn);
+ free(cent);
+ free(cl);
+
+ //start running distance
+ return 0;
+}
diff --git a/distance_txt.c b/distance_txt.c
new file mode 100644
index 0000000..da5b67e
--- /dev/null
+++ b/distance_txt.c
@@ -0,0 +1,175 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include <stdlib.h>
+#include <time.h>
+
+const long long max_size = 2000; // max length of strings
+const long long N = 40; // number of closest words that will be shown
+const long long max_w = 50; // max length of vocabulary entries
+
+#define MAX_STRING 100
+void ReadWord(char *word, FILE *fin) {
+ int a = 0, ch;
+ while (!feof(fin)) {
+ ch = fgetc(fin);
+ if (ch == 13) continue;
+ if ((ch == ' ') || (ch == '\t') || (ch == '\n')) {
+ if (a > 0) {
+ if (ch == '\n') ungetc(ch, fin);
+ break;
+ }
+ if (ch == '\n') {
+ strcpy(word, (char *)"</s>");
+ return;
+ } else continue;
+ }
+ word[a] = ch;
+ a++;
+ if (a >= MAX_STRING - 1) a--; // Truncate too long words
+ }
+ word[a] = 0;
+}
+
+int main(int argc, char **argv) {
+ FILE *f;
+ char st1[max_size];
+ char *bestw[N];
+ char file_name[max_size], st[100][max_size];
+ float dist, len, bestd[N], vec[max_size];
+ long long words, size, a, b, c, d, cn, bi[100];
+ float *M;
+ char *vocab;
+ char word[MAX_STRING];
+ clock_t begin;
+ if (argc < 2) {
+ printf("Usage: ./distance <FILE>\nwhere FILE contains word projections in the BINARY FORMAT\n");
+ return 0;
+ }
+ strcpy(file_name, argv[1]);
+ f = fopen(file_name, "rb");
+ if (f == NULL) {
+ printf("Input file not found\n");
+ return -1;
+ }
+ ReadWord(word, f);
+ words = atoi(word);
+ ReadWord(word, f);
+ size = atoi(word);
+ vocab = (char *)malloc((long long)words * max_w * sizeof(char));
+ for (a = 0; a < N; a++) bestw[a] = (char *)malloc(max_size * sizeof(char));
+ M = (float *)malloc((long long)words * (long long)size * sizeof(float));
+ if (M == NULL) {
+ printf("Cannot allocate memory: %lld MB %lld %lld\n", (long long)words * size * sizeof(float) / 1048576, words, size);
+ return -1;
+ }
+ for (b = 0; b < words; b++) {
+ a = 0;
+ while (1) {
+ vocab[b * max_w + a] = fgetc(f);
+ if (feof(f) || (vocab[b * max_w + a] == ' ')) break;
+ if ((a < max_w) && (vocab[b * max_w + a] != '\n')) a++;
+ }
+ vocab[b * max_w + a] = 0;
+ for (a = 0; a < size; a++) {
+ ReadWord(word,f);
+ M[a + b * size] = atof(word);
+ }
+ len = 0;
+ for (a = 0; a < size; a++) len += M[a + b * size] * M[a + b * size];
+ len = sqrt(len);
+ for (a = 0; a < size; a++) M[a + b * size] /= len;
+ }
+ fclose(f);
+ while (1) {
+ for (a = 0; a < N; a++) bestd[a] = 0;
+ for (a = 0; a < N; a++) bestw[a][0] = 0;
+ printf("Enter word or sentence (EXIT to break): ");
+ a = 0;
+ while (1) {
+ st1[a] = fgetc(stdin);
+ if ((st1[a] == '\n') || (a >= max_size - 1)) {
+ st1[a] = 0;
+ break;
+ }
+ a++;
+ }
+ if (!strcmp(st1, "EXIT")) break;
+ cn = 0;
+ b = 0;
+ c = 0;
+ while (1) {
+ st[cn][b] = st1[c];
+ b++;
+ c++;
+ st[cn][b] = 0;
+ if (st1[c] == 0) break;
+ if (st1[c] == ' ') {
+ cn++;
+ b = 0;
+ c++;
+ }
+ }
+ cn++;
+ for (a = 0; a < cn; a++) {
+ for (b = 0; b < words; b++) if (!strcmp(&vocab[b * max_w], st[a])) break;
+ if (b == words) b = -1;
+ bi[a] = b;
+ printf("\nWord: %s Position in vocabulary: %lld\n", st[a], bi[a]);
+ if (b == -1) {
+ printf("Out of dictionary word!\n");
+ break;
+ }
+ }
+ if (b == -1) continue;
+ begin = clock();
+
+ printf("\n Word Cosine distance\n------------------------------------------------------------------------\n");
+ for (a = 0; a < size; a++) vec[a] = 0;
+ for (b = 0; b < cn; b++) {
+ if (bi[b] == -1) continue;
+ for (a = 0; a < size; a++) vec[a] += M[a + bi[b] * size];
+ }
+ len = 0;
+ for (a = 0; a < size; a++) len += vec[a] * vec[a];
+ len = sqrt(len);
+ for (a = 0; a < size; a++) vec[a] /= len;
+ for (a = 0; a < N; a++) bestd[a] = -1;
+ for (a = 0; a < N; a++) bestw[a][0] = 0;
+ for (c = 0; c < words; c++) {
+ a = 0;
+ for (b = 0; b < cn; b++) if (bi[b] == c) a = 1;
+ if (a == 1) continue;
+ dist = 0;
+ for (a = 0; a < size; a++) dist += vec[a] * M[a + c * size];
+ for (a = 0; a < N; a++) {
+ if (dist > bestd[a]) {
+ for (d = N - 1; d > a; d--) {
+ bestd[d] = bestd[d - 1];
+ strcpy(bestw[d], bestw[d - 1]);
+ }
+ bestd[a] = dist;
+ strcpy(bestw[a], &vocab[c * max_w]);
+ break;
+ }
+ }
+ }
+ for (a = 0; a < N; a++) printf("%50s\t\t%f\n", bestw[a], bestd[a]);
+ printf("time spent = %f seconds\n", (double)(clock() - begin) / CLOCKS_PER_SEC);
+ }
+ return 0;
+}
diff --git a/kmeans_txt.c b/kmeans_txt.c
new file mode 100644
index 0000000..16934ab
--- /dev/null
+++ b/kmeans_txt.c
@@ -0,0 +1,161 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include <stdlib.h>
+
+const long long max_size = 2000; // max length of strings
+const long long N = 40; // number of closest words that will be shown
+const long long max_w = 50; // max length of vocabulary entries
+
+#define MAX_STRING 100
+void ReadWord(char *word, FILE *fin) {
+ int a = 0, ch;
+ while (!feof(fin)) {
+ ch = fgetc(fin);
+ if (ch == 13) continue;
+ if ((ch == ' ') || (ch == '\t') || (ch == '\n')) {
+ if (a > 0) {
+ if (ch == '\n') ungetc(ch, fin);
+ break;
+ }
+ if (ch == '\n') {
+ strcpy(word, (char *)"</s>");
+ return;
+ } else continue;
+ }
+ word[a] = ch;
+ a++;
+ if (a >= MAX_STRING - 1) a--; // Truncate too long words
+ }
+ word[a] = 0;
+}
+
+int main(int argc, char **argv) {
+ FILE *f;
+ char file_name[max_size], output_file[max_size];
+ float len;
+ long long words, size, a, b, c, d;
+ float *M;
+ char *vocab;
+ char word[MAX_STRING];
+ if (argc < 3) {
+ printf("Usage: ./kmeans_txt <FILE>\nwhere FILE contains features\n <number_of_classes>");
+ return 0;
+ }
+ strcpy(file_name, argv[1]);
+ strcpy(output_file, argv[2]);
+ int classes = atoi(argv[3]);
+ f = fopen(file_name, "rb");
+ if (f == NULL) {
+ printf("Input file not found\n");
+ return -1;
+ }
+
+ FILE *fo = fopen(output_file, "wb");
+
+ ReadWord(word, f);
+ words = atoi(word);
+ ReadWord(word, f);
+ size = atoi(word);
+ vocab = (char *)malloc((long long)words * max_w * sizeof(char));
+ M = (float *)malloc((long long)words * (long long)size * sizeof(float));
+ if (M == NULL) {
+ printf("Cannot allocate memory: %lld MB %lld %lld\n", (long long)words * size * sizeof(float) / 1048576, words, size);
+ return -1;
+ }
+ for (b = 0; b < words; b++) {
+ a = 0;
+ while (1) {
+ vocab[b * max_w + a] = fgetc(f);
+ if (feof(f) || (vocab[b * max_w + a] == ' ')) break;
+ if ((a < max_w) && (vocab[b * max_w + a] != '\n')) a++;
+ }
+ vocab[b * max_w + a] = 0;
+ for (a = 0; a < size; a++) {
+ ReadWord(word,f);
+ M[a + b * size] = atof(word);
+ }
+ len = 0;
+ for (a = 0; a < size; a++) len += M[a + b * size] * M[a + b * size];
+ len = sqrt(len);
+ for (a = 0; a < size; a++) M[a + b * size] /= len;
+ }
+ fclose(f);
+
+ //run kmeans
+ int clcn = classes, iter = 2, closeid;
+ int *centcn = (int *)malloc(classes * sizeof(int));
+ int *cl = (int *)calloc(words, sizeof(int));
+ float closev, x;
+ float *cent = (float *)calloc(classes * size, sizeof(float));
+ for (a = 0; a < words; a++) cl[a] = a % clcn;
+ for (a = 0; a < iter; a++) {
+ for (b = 0; b < clcn * size; b++) cent[b] = 0;
+ for (b = 0; b < clcn; b++) centcn[b] = 1;
+ for (c = 0; c < words; c++) {
+ for (d = 0; d < size; d++) cent[size * cl[c] + d] += M[c * size + d];
+ centcn[cl[c]]++;
+ }
+ for (b = 0; b < clcn; b++) {
+ closev = 0;
+ for (c = 0; c < size; c++) {
+ cent[size * b + c] /= centcn[b];
+ closev += cent[size * b + c] * cent[size * b + c];
+ }
+ closev = sqrt(closev);
+ for (c = 0; c < size; c++) cent[size * b + c] /= closev;
+ }
+ for (c = 0; c < words; c++) {
+ closev = -10;
+ closeid = 0;
+ for (d = 0; d < clcn; d++) {
+ x = 0;
+ for (b = 0; b < size; b++) x += cent[size * d + b] * M[c * size + b];
+ if (x > closev) {
+ closev = x;
+ closeid = d;
+ }
+ }
+ cl[c] = closeid;
+ }
+ }
+
+ // build an array of words ordered by class and their offsets (index where each class starts)
+ int class_words[words];
+ int class_offsets[classes];
+ for(a = 0; a < classes; a++) class_offsets[a]=0;
+ for(a = 0; a < words; a++) class_offsets[cl[a]]++;
+ for(a = 1; a < classes; a++) class_offsets[a] += class_offsets[a-1];
+ for(a = 0; a < words; a++) class_words[--class_offsets[cl[a]]] = a;
+
+ for (a = 0; a < classes; a++){
+ c = words;
+ if(a < classes-1) c = class_offsets[a+1];
+ b = class_offsets[a];
+ for(; b < c; b++){
+ fprintf(fo, "%lld %s\n", a ,&vocab[class_words[b] * max_w]);
+ }
+ }
+ // Save the K-means classes
+ //for (a = 0; a < words; a++) fprintf(fo, "%s %d\n", &vocab[a * max_w], cl[a]);
+ free(centcn);
+ free(cent);
+ free(cl);
+ free(M);
+ free(vocab);
+ return 0;
+}
diff --git a/makefile b/makefile
new file mode 100644
index 0000000..0ea6193
--- /dev/null
+++ b/makefile
@@ -0,0 +1,32 @@
+CC = gcc
+#Using -Ofast instead of -O3 might result in faster code, but is supported only by newer GCC versions
+CFLAGS = -lm -pthread -O3 -march=k8 -mtune=k8 -Wall -funroll-loops
+#CFLAGS = -m64 -march=k8 -mtune=k8 -lm -pthread -O3 -Wall -funroll-loops
+
+
+all: word2vec cngram2vec weightedWord2vec wordless2vec word2phrase distance word-analogy compute-accuracy distance_txt distance_fast kmeans_txt
+
+word2vec : word2vecExt.c
+ $(CC) word2vecExt.c -o word2vec $(CFLAGS)
+weightedWord2vec : weightedWord2vec.c
+ $(CC) weightedWord2vec.c -o weightedWord2vec $(CFLAGS)
+cngram2vec : cngram2vec.c
+ $(CC) cngram2vec.c -o cngram2vec $(CFLAGS)
+wordless2vec : wordless2vec.c
+ $(CC) wordless2vec.c -o wordless2vec $(CFLAGS)
+word2phrase : word2phrase.c
+ $(CC) word2phrase.c -o word2phrase $(CFLAGS)
+distance : distance.c
+ $(CC) distance.c -o distance $(CFLAGS)
+distance_txt : distance_txt.c
+ $(CC) distance_txt.c -o distance_txt $(CFLAGS)
+distance_fast : distance_fast.c
+ $(CC) distance_fast.c -o distance_fast $(CFLAGS)
+kmeans_txt : kmeans_txt.c
+ $(CC) kmeans_txt.c -o kmeans_txt $(CFLAGS)
+word-analogy : word-analogy.c
+ $(CC) word-analogy.c -o word-analogy $(CFLAGS)
+compute-accuracy : compute-accuracy.c
+ $(CC) compute-accuracy.c -o compute-accuracy $(CFLAGS)
+clean:
+ rm -rf word2vec weightedWord2vec cngram2vec wiord2phrase distance word-analogy compute-accuracy distance_txt kmeans_txt
diff --git a/weightedWord2vec.c b/weightedWord2vec.c
new file mode 100644
index 0000000..f1d8f60
--- /dev/null
+++ b/weightedWord2vec.c
@@ -0,0 +1,1317 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include <pthread.h>
+
+#define MAX_STRING 100
+#define EXP_TABLE_SIZE 1000
+#define MAX_EXP 6
+#define MAX_SENTENCE_LENGTH 1000
+#define MAX_CODE_LENGTH 40
+
+const int vocab_hash_size = 30000000; // Maximum 30 * 0.7 = 21M words in the vocabulary
+
+typedef float real; // Precision of float numbers
+
+struct vocab_word {
+ long long cn;
+ int *point;
+ char *word, *code, codelen;
+};
+
+char train_file[MAX_STRING], output_file[MAX_STRING];
+char save_vocab_file[MAX_STRING], read_vocab_file[MAX_STRING];
+struct vocab_word *vocab;
+int binary = 0, type = 1, debug_mode = 2, window = 5, min_count = 5, num_threads = 12, min_reduce = 1;
+int *vocab_hash;
+long long vocab_max_size = 1000, vocab_size = 0, layer1_size = 100;
+long long train_words = 0, word_count_actual = 0, iter = 5, file_size = 0, classes = 0;
+real alpha = 0.025, starting_alpha, sample = 1e-3;
+real *syn0, *syn1, *syn1neg, *syn1nce, *expTable;
+clock_t start;
+
+real *syn1_window, *syn1neg_window, *syn1nce_window;
+int w_offset, window_layer_size;
+
+int window_hidden_size = 500;
+real *syn_window_hidden, *syn_hidden_word, *syn_hidden_word_neg, *syn_hidden_word_nce;
+
+int hs = 0, negative = 5;
+const int table_size = 1e8;
+int *table;
+
+//constrastive negative sampling
+char negative_classes_file[MAX_STRING];
+int *word_to_group;
+int *group_to_table; //group_size*table_size
+int class_number;
+
+//nce
+real* noise_distribution;
+int nce = 0;
+
+//param caps
+real CAP_VALUE = 50;
+int cap = 0;
+
+void capParam(real* array, int index){
+ if(array[index] > CAP_VALUE)
+ array[index] = CAP_VALUE;
+ else if(array[index] < -CAP_VALUE)
+ array[index] = -CAP_VALUE;
+}
+
+real hardTanh(real x){
+ if(x>=1){
+ return 1;
+ }
+ else if(x<=-1){
+ return -1;
+ }
+ else{
+ return x;
+ }
+}
+
+real dHardTanh(real x, real g){
+ if(x > 1 && g > 0){
+ return 0;
+ }
+ if(x < -1 && g < 0){
+ return 0;
+ }
+ return 1;
+}
+
+int isEndOfSentence(char* word){
+ return strcmp("</s>", word) == 0;
+}
+
+void InitUnigramTable() {
+ int a, i;
+ long long train_words_pow = 0;
+ real d1, power = 0.75;
+ table = (int *)malloc(table_size * sizeof(int));
+ for (a = 0; a < vocab_size; a++) train_words_pow += pow(vocab[a].cn, power);
+ i = 0;
+ d1 = pow(vocab[i].cn, power) / (real)train_words_pow;
+ for (a = 0; a < table_size; a++) {
+ table[a] = i;
+ if (a / (real)table_size > d1) {
+ i++;
+ d1 += pow(vocab[i].cn, power) / (real)train_words_pow;
+ }
+ if (i >= vocab_size) i = vocab_size - 1;
+ }
+
+ noise_distribution = (real *)calloc(vocab_size, sizeof(real));
+ for (a = 0; a < vocab_size; a++) noise_distribution[a] = pow(vocab[a].cn, power)/(real)train_words_pow;
+}
+
+// Reads a single word from a file, assuming space + tab + EOL to be word boundaries
+void ReadWord(char *word, FILE *fin) {
+ int a = 0, ch;
+ while (!feof(fin)) {
+ ch = fgetc(fin);
+ if (ch == 13) continue;
+ if ((ch == ' ') || (ch == '\t') || (ch == '\n')) {
+ if (a > 0) {
+ if (ch == '\n') ungetc(ch, fin);
+ break;
+ }
+ if (ch == '\n') {
+ strcpy(word, (char *)"</s>");
+ return;
+ } else continue;
+ }
+ word[a] = ch;
+ a++;
+ if (a >= MAX_STRING - 1) a--; // Truncate too long words
+ }
+ word[a] = 0;
+}
+
+// Returns hash value of a word
+int GetWordHash(char *word) {
+ unsigned long long a, hash = 0;
+ for (a = 0; a < strlen(word); a++) hash = hash * 257 + word[a];
+ hash = hash % vocab_hash_size;
+ return hash;
+}
+
+// Returns position of a word in the vocabulary; if the word is not found, returns -1
+int SearchVocab(char *word) {
+ unsigned int hash = GetWordHash(word);
+ while (1) {
+ if (vocab_hash[hash] == -1) return -1;
+ if (!strcmp(word, vocab[vocab_hash[hash]].word)) return vocab_hash[hash];
+ hash = (hash + 1) % vocab_hash_size;
+ }
+ return -1;
+}
+
+// Reads a word and returns its index in the vocabulary
+int ReadWordIndex(FILE *fin) {
+ char word[MAX_STRING];
+ ReadWord(word, fin);
+ if (feof(fin)) return -1;
+ return SearchVocab(word);
+}
+
+// Adds a word to the vocabulary
+int AddWordToVocab(char *word) {
+ unsigned int hash, length = strlen(word) + 1;
+ if (length > MAX_STRING) length = MAX_STRING;
+ vocab[vocab_size].word = (char *)calloc(length, sizeof(char));
+ strcpy(vocab[vocab_size].word, word);
+ vocab[vocab_size].cn = 0;
+ vocab_size++;
+ // Reallocate memory if needed
+ if (vocab_size + 2 >= vocab_max_size) {
+ vocab_max_size += 1000;
+ vocab = (struct vocab_word *)realloc(vocab, vocab_max_size * sizeof(struct vocab_word));
+ }
+ hash = GetWordHash(word);
+ while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
+ vocab_hash[hash] = vocab_size - 1;
+ return vocab_size - 1;
+}
+
+// Used later for sorting by word counts
+int VocabCompare(const void *a, const void *b) {
+ return ((struct vocab_word *)b)->cn - ((struct vocab_word *)a)->cn;
+}
+
+// Sorts the vocabulary by frequency using word counts
+void SortVocab() {
+ int a, size;
+ unsigned int hash;
+ // Sort the vocabulary and keep </s> at the first position
+ qsort(&vocab[1], vocab_size - 1, sizeof(struct vocab_word), VocabCompare);
+ for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
+ size = vocab_size;
+ train_words = 0;
+ for (a = 0; a < size; a++) {
+ // Words occuring less than min_count times will be discarded from the vocab
+ if ((vocab[a].cn < min_count) && (a != 0)) {
+ vocab_size--;
+ free(vocab[a].word);
+ } else {
+ // Hash will be re-computed, as after the sorting it is not actual
+ hash=GetWordHash(vocab[a].word);
+ while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
+ vocab_hash[hash] = a;
+ train_words += vocab[a].cn;
+ }
+ }
+ vocab = (struct vocab_word *)realloc(vocab, (vocab_size + 1) * sizeof(struct vocab_word));
+ // Allocate memory for the binary tree construction
+ for (a = 0; a < vocab_size; a++) {
+ vocab[a].code = (char *)calloc(MAX_CODE_LENGTH, sizeof(char));
+ vocab[a].point = (int *)calloc(MAX_CODE_LENGTH, sizeof(int));
+ }
+}
+
+// Reduces the vocabulary by removing infrequent tokens
+void ReduceVocab() {
+ int a, b = 0;
+ unsigned int hash;
+ for (a = 0; a < vocab_size; a++) if (vocab[a].cn > min_reduce) {
+ vocab[b].cn = vocab[a].cn;
+ vocab[b].word = vocab[a].word;
+ b++;
+ } else free(vocab[a].word);
+ vocab_size = b;
+ for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
+ for (a = 0; a < vocab_size; a++) {
+ // Hash will be re-computed, as it is not actual
+ hash = GetWordHash(vocab[a].word);
+ while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
+ vocab_hash[hash] = a;
+ }
+ fflush(stdout);
+ min_reduce++;
+}
+
+// Create binary Huffman tree using the word counts
+// Frequent words will have short uniqe binary codes
+void CreateBinaryTree() {
+ long long a, b, i, min1i, min2i, pos1, pos2, point[MAX_CODE_LENGTH];
+ char code[MAX_CODE_LENGTH];
+ long long *count = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
+ long long *binary = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
+ long long *parent_node = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
+ for (a = 0; a < vocab_size; a++) count[a] = vocab[a].cn;
+ for (a = vocab_size; a < vocab_size * 2; a++) count[a] = 1e15;
+ pos1 = vocab_size - 1;
+ pos2 = vocab_size;
+ // Following algorithm constructs the Huffman tree by adding one node at a time
+ for (a = 0; a < vocab_size - 1; a++) {
+ // First, find two smallest nodes 'min1, min2'
+ if (pos1 >= 0) {
+ if (count[pos1] < count[pos2]) {
+ min1i = pos1;
+ pos1--;
+ } else {
+ min1i = pos2;
+ pos2++;
+ }
+ } else {
+ min1i = pos2;
+ pos2++;
+ }
+ if (pos1 >= 0) {
+ if (count[pos1] < count[pos2]) {
+ min2i = pos1;
+ pos1--;
+ } else {
+ min2i = pos2;
+ pos2++;
+ }
+ } else {
+ min2i = pos2;
+ pos2++;
+ }
+ count[vocab_size + a] = count[min1i] + count[min2i];
+ parent_node[min1i] = vocab_size + a;
+ parent_node[min2i] = vocab_size + a;
+ binary[min2i] = 1;
+ }
+ // Now assign binary code to each vocabulary word
+ for (a = 0; a < vocab_size; a++) {
+ b = a;
+ i = 0;
+ while (1) {
+ code[i] = binary[b];
+ point[i] = b;
+ i++;
+ b = parent_node[b];
+ if (b == vocab_size * 2 - 2) break;
+ }
+ vocab[a].codelen = i;
+ vocab[a].point[0] = vocab_size - 2;
+ for (b = 0; b < i; b++) {
+ vocab[a].code[i - b - 1] = code[b];
+ vocab[a].point[i - b] = point[b] - vocab_size;
+ }
+ }
+ free(count);
+ free(binary);
+ free(parent_node);
+}
+
+void LearnVocabFromTrainFile() {
+ char word[MAX_STRING];
+ FILE *fin;
+ long long a, i;
+ for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
+ fin = fopen(train_file, "rb");
+ if (fin == NULL) {
+ printf("ERROR: training data file not found!\n");
+ exit(1);
+ }
+ vocab_size = 0;
+ AddWordToVocab((char *)"</s>");
+ int startOfLine = 1;
+ while (1) {
+ ReadWord(word, fin);
+ if (feof(fin)) break;
+ if (startOfLine) {
+ ReadWord(word, fin);
+ startOfLine = 0;
+ }
+ if(isEndOfSentence(word)){
+ startOfLine = 1;
+ }
+ train_words++;
+ if ((debug_mode > 1) && (train_words % 100000 == 0)) {
+ printf("%lldK%c", train_words / 1000, 13);
+ fflush(stdout);
+ }
+ i = SearchVocab(word);
+ if (i == -1) {
+ a = AddWordToVocab(word);
+ vocab[a].cn = 1;
+ } else vocab[i].cn++;
+ if (vocab_size > vocab_hash_size * 0.7) ReduceVocab();
+ }
+ SortVocab();
+ if (debug_mode > 0) {
+ printf("Vocab size: %lld\n", vocab_size);
+ printf("Words in train file: %lld\n", train_words);
+ }
+ file_size = ftell(fin);
+ fclose(fin);
+}
+
+void SaveVocab() {
+ long long i;
+ FILE *fo = fopen(save_vocab_file, "wb");
+ for (i = 0; i < vocab_size; i++) fprintf(fo, "%s %lld\n", vocab[i].word, vocab[i].cn);
+ fclose(fo);
+}
+
+void ReadVocab() {
+ long long a, i = 0;
+ char c;
+ char word[MAX_STRING];
+ FILE *fin = fopen(read_vocab_file, "rb");
+ if (fin == NULL) {
+ printf("Vocabulary file not found\n");
+ exit(1);
+ }
+ for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
+ vocab_size = 0;
+ while (1) {
+ ReadWord(word, fin);
+ if (feof(fin)) break;
+ a = AddWordToVocab(word);
+ fscanf(fin, "%lld%c", &vocab[a].cn, &c);
+ i++;
+ }
+ SortVocab();
+ if (debug_mode > 0) {
+ printf("Vocab size: %lld\n", vocab_size);
+ printf("Words in train file: %lld\n", train_words);
+ }
+ fin = fopen(train_file, "rb");
+ if (fin == NULL) {
+ printf("ERROR: training data file not found!\n");
+ exit(1);
+ }
+ fseek(fin, 0, SEEK_END);
+ file_size = ftell(fin);
+ fclose(fin);
+}
+
+void InitClassUnigramTable() {
+ long long a,c;
+ printf("loading class unigrams \n");
+ FILE *fin = fopen(negative_classes_file, "rb");
+ if (fin == NULL) {
+ printf("ERROR: class file not found!\n");
+ exit(1);
+ }
+ word_to_group = (int *)malloc(vocab_size * sizeof(int));
+ for(a = 0; a < vocab_size; a++) word_to_group[a] = -1;
+ char class[MAX_STRING];
+ char prev_class[MAX_STRING];
+ prev_class[0] = 0;
+ char word[MAX_STRING];
+ class_number = -1;
+ while (1) {
+ if (feof(fin)) break;
+ ReadWord(class, fin);
+ ReadWord(word, fin);
+ int word_index = SearchVocab(word);
+ if (word_index != -1){
+ if(strcmp(class, prev_class) != 0){
+ class_number++;
+ strcpy(prev_class, class);
+ }
+ word_to_group[word_index] = class_number;
+ }
+ ReadWord(word, fin);
+ }
+ class_number++;
+ fclose(fin);
+
+ group_to_table = (int *)malloc(table_size * class_number * sizeof(int));
+ long long train_words_pow = 0;
+ real d1, power = 0.75;
+
+ for(c = 0; c < class_number; c++){
+ long long offset = c * table_size;
+ train_words_pow = 0;
+ for (a = 0; a < vocab_size; a++) if(word_to_group[a] == c) train_words_pow += pow(vocab[a].cn, power);
+ int i = 0;
+ while(word_to_group[i]!=c && i < vocab_size) i++;
+ d1 = pow(vocab[i].cn, power) / (real)train_words_pow;
+ for (a = 0; a < table_size; a++) {
+ //printf("index %lld , word %d\n", a, i);
+ group_to_table[offset + a] = i;
+ if (a / (real)table_size > d1) {
+ i++;
+ while(word_to_group[i]!=c && i < vocab_size) i++;
+ d1 += pow(vocab[i].cn, power) / (real)train_words_pow;
+ }
+ if (i >= vocab_size) while(word_to_group[i]!=c && i >= 0) i--;
+ }
+ }
+}
+
+void InitNet() {
+ long long a, b;
+ unsigned long long next_random = 1;
+ window_layer_size = layer1_size*window*2;
+ a = posix_memalign((void **)&syn0, 128, (long long)vocab_size * layer1_size * sizeof(real));
+ if (syn0 == NULL) {printf("Memory allocation failed\n"); exit(1);}
+
+ if (hs) {
+ a = posix_memalign((void **)&syn1, 128, (long long)vocab_size * layer1_size * sizeof(real));
+ if (syn1 == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn1_window, 128, (long long)vocab_size * window_layer_size * sizeof(real));
+ if (syn1_window == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn_hidden_word, 128, (long long)vocab_size * window_hidden_size * sizeof(real));
+ if (syn_hidden_word == NULL) {printf("Memory allocation failed\n"); exit(1);}
+
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++)
+ syn1[a * layer1_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_layer_size; b++)
+ syn1_window[a * window_layer_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_hidden_size; b++)
+ syn_hidden_word[a * window_hidden_size + b] = 0;
+ }
+ if (negative>0) {
+ a = posix_memalign((void **)&syn1neg, 128, (long long)vocab_size * layer1_size * sizeof(real));
+ if (syn1neg == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn1neg_window, 128, (long long)vocab_size * window_layer_size * sizeof(real));
+ if (syn1neg_window == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn_hidden_word_neg, 128, (long long)vocab_size * window_hidden_size * sizeof(real));
+ if (syn_hidden_word_neg == NULL) {printf("Memory allocation failed\n"); exit(1);}
+
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++)
+ syn1neg[a * layer1_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_layer_size; b++)
+ syn1neg_window[a * window_layer_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_hidden_size; b++)
+ syn_hidden_word_neg[a * window_hidden_size + b] = 0;
+ }
+ if (nce>0) {
+ a = posix_memalign((void **)&syn1nce, 128, (long long)vocab_size * layer1_size * sizeof(real));
+ if (syn1nce == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn1nce_window, 128, (long long)vocab_size * window_layer_size * sizeof(real));
+ if (syn1nce_window == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn_hidden_word_nce, 128, (long long)vocab_size * window_hidden_size * sizeof(real));
+ if (syn_hidden_word_nce == NULL) {printf("Memory allocation failed\n"); exit(1);}
+
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++)
+ syn1nce[a * layer1_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_layer_size; b++)
+ syn1nce_window[a * window_layer_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_hidden_size; b++)
+ syn_hidden_word_nce[a * window_hidden_size + b] = 0;
+ }
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++) {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ syn0[a * layer1_size + b] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / layer1_size;
+ }
+
+ a = posix_memalign((void **)&syn_window_hidden, 128, window_hidden_size * window_layer_size * sizeof(real));
+ if (syn_window_hidden == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ for (a = 0; a < window_hidden_size * window_layer_size; a++){
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ syn_window_hidden[a] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / (window_hidden_size*window_layer_size);
+ }
+
+ CreateBinaryTree();
+}
+
+long long findStartOfLine(char* file, long long start){
+ char word[MAX_STRING];
+ if(start == 0) return 0;
+ while(start != 0){
+ FILE*fi = fopen(file, "rb");
+ fseek(fi, start, SEEK_SET);
+ ReadWord(word, fi);
+ if(isEndOfSentence(word)){
+ fclose(fi);
+ return start+1;
+ }
+ fclose(fi);
+ start--;
+ }
+ return 0;
+}
+
+void *TrainModelThread(void *id) {
+ char word_str[MAX_STRING];
+ long long a, b, d, cw, word, last_word, sentence_length = 0, sentence_position = 0;
+ long long word_count = 0, last_word_count = 0, sen[MAX_SENTENCE_LENGTH + 1];
+ long long l1, l2, c, target, label, local_iter = iter;
+ unsigned long long next_random = (long long)id;
+ real f, g;
+ clock_t now;
+ int input_len_1 = layer1_size;
+ int window_offset = -1;
+ float currentWeight = 0;
+ if(type == 2 || type == 4){
+ input_len_1=window_layer_size;
+ }
+ real *neu1 = (real *)calloc(input_len_1, sizeof(real));
+ real *neu1e = (real *)calloc(input_len_1, sizeof(real));
+
+ int input_len_2 = 0;
+ if(type == 4){
+ input_len_2 = window_hidden_size;
+ }
+ real *neu2 = (real *)calloc(input_len_2, sizeof(real));
+ real *neu2e = (real *)calloc(input_len_2, sizeof(real));
+
+ long long start_pos = findStartOfLine(train_file, file_size / (long long)num_threads * (long long)id);
+ FILE *fi = fopen(train_file, "rb");
+ fseek(fi, start_pos, SEEK_SET);
+ int startOfSentence = 1;
+ int startEndOfLineIndex = SearchVocab("</s>");
+ while (1) {
+ if (word_count - last_word_count > 10000) {
+ word_count_actual += word_count - last_word_count;
+ last_word_count = word_count;
+ if ((debug_mode > 1)) {
+ now=clock();
+ printf("%cAlpha: %f Weight: %f Progress: %.2f%% Words/thread/sec: %.2fk ", 13, alpha, currentWeight,
+ word_count_actual / (real)(iter * train_words + 1) * 100,
+ word_count_actual / ((real)(now - start + 1) / (real)CLOCKS_PER_SEC * 1000));
+ fflush(stdout);
+ }
+ alpha = starting_alpha * (1 - word_count_actual / (real)(iter * train_words + 1));
+ if (alpha < starting_alpha * 0.0001) alpha = starting_alpha * 0.0001;
+ }
+ if (sentence_length == 0) {
+ while (1) {
+ if(startOfSentence){
+ ReadWord(word_str, fi);
+ currentWeight = atof(word_str);
+ startOfSentence = 0;
+ continue;
+ }
+ word = ReadWordIndex(fi);
+ if (word == startEndOfLineIndex){
+ startOfSentence = 1;
+ }
+ if (feof(fi)) break;
+ if (word == -1) continue;
+ word_count++;
+ if (word == 0) break;
+ // The subsampling randomly discards frequent words while keeping the ranking same
+ if (sample > 0) {
+ real ran = (sqrt(vocab[word].cn / (sample * train_words)) + 1) * (sample * train_words) / vocab[word].cn;
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if (ran < (next_random & 0xFFFF) / (real)65536) continue;
+ }
+ sen[sentence_length] = word;
+ sentence_length++;
+ if (sentence_length >= MAX_SENTENCE_LENGTH) break;
+ }
+ sentence_position = 0;
+ }
+ if (feof(fi) || (word_count > train_words / num_threads)) {
+ word_count_actual += word_count - last_word_count;
+ local_iter--;
+ if (local_iter == 0) break;
+ word_count = 0;
+ last_word_count = 0;
+ sentence_length = 0;
+ fseek(fi, start_pos, SEEK_SET);
+ continue;
+ }
+ word = sen[sentence_position];
+ if (word == -1) continue;
+ for (c = 0; c < input_len_1; c++) neu1[c] = 0;
+ for (c = 0; c < input_len_1; c++) neu1e[c] = 0;
+ for (c = 0; c < input_len_2; c++) neu2[c] = 0;
+ for (c = 0; c < input_len_2; c++) neu2e[c] = 0;
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ b = next_random % window;
+ if (type == 0) { //train the cbow architecture
+ // in -> hidden
+ cw = 0;
+ for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ for (c = 0; c < layer1_size; c++) neu1[c] += syn0[c + last_word * layer1_size];
+ cw++;
+ }
+ if (cw) {
+ for (c = 0; c < layer1_size; c++) neu1[c] /= cw;
+ if (hs) for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * layer1_size;
+ // Propagate hidden -> output
+ for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1[c + l2];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha * currentWeight;
+ // Propagate errors output -> hidden
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * neu1[c];
+ if(cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1, c + l2);
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * layer1_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1neg[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha * currentWeight;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha * currentWeight;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha * currentWeight;
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg[c + l2];
+ for (c = 0; c < layer1_size; c++) syn1neg[c + l2] += g * neu1[c];
+ if (cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1neg, c + l2);
+ }
+ // Noise Contrastive Estimation
+ if (nce > 0) for (d = 0; d < nce + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * layer1_size;
+ f = 0;
+
+ for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1nce[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha * currentWeight;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha * currentWeight;
+ else {
+ f = exp(f);
+ g = (label - f/(noise_distribution[target]*nce + f)) * alpha * currentWeight;
+ }
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1nce[c + l2];
+ for (c = 0; c < layer1_size; c++) syn1nce[c + l2] += g * neu1[c];
+ if(cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1nce,c + l2);
+ }
+ // hidden -> in
+ for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ for (c = 0; c < layer1_size; c++) syn0[c + last_word * layer1_size] += neu1e[c];
+ }
+ }
+ } else if(type==1) { //train skip-gram
+ for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ l1 = last_word * layer1_size;
+ for (c = 0; c < layer1_size; c++) neu1e[c] = 0;
+ // HIERARCHICAL SOFTMAX
+ if (hs) for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * layer1_size;
+ // Propagate hidden -> output
+ for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1[c + l2];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha * currentWeight;
+ // Propagate errors output -> hidden
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * syn0[c + l1];
+ if (cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1, c + l2);
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * layer1_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1neg[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha * currentWeight;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha * currentWeight;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha * currentWeight;
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg[c + l2];
+ for (c = 0; c < layer1_size; c++) syn1neg[c + l2] += g * syn0[c + l1];
+ if (cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1neg, c + l2);
+ }
+ //Noise Contrastive Estimation
+ if (nce > 0) for (d = 0; d < nce + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * layer1_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1nce[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha * currentWeight;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha * currentWeight;
+ else {
+ f = exp(f);
+ g = (label - f/(noise_distribution[target]*nce + f)) * alpha * currentWeight;
+ }
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1nce[c + l2];
+ for (c = 0; c < layer1_size; c++) syn1nce[c + l2] += g * syn0[c + l1];
+ if (cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1nce, c + l2);
+ }
+ // Learn weights input -> hidden
+ for (c = 0; c < layer1_size; c++) syn0[c + l1] += neu1e[c];
+ }
+ }
+ else if(type == 2){ //train the cwindow architecture
+ // in -> hidden
+ cw = 0;
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ window_offset = a*layer1_size;
+ if (a > window) window_offset-=layer1_size;
+ for (c = 0; c < layer1_size; c++) neu1[c+window_offset] += syn0[c + last_word * layer1_size];
+ cw++;
+ }
+ if (cw) {
+ if (hs) for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * window_layer_size;
+ // Propagate hidden -> output
+ for (c = 0; c < window_layer_size; c++) f += neu1[c] * syn1_window[c + l2];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha * currentWeight;
+ // Propagate errors output -> hidden
+ for (c = 0; c < window_layer_size; c++) neu1e[c] += g * syn1_window[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < window_layer_size; c++) syn1_window[c + l2] += g * neu1[c];
+ if (cap == 1) for (c = 0; c < window_layer_size; c++) capParam(syn1_window, c + l2);
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * window_layer_size;
+ f = 0;
+ for (c = 0; c < window_layer_size; c++) f += neu1[c] * syn1neg_window[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha * currentWeight;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha * currentWeight;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha * currentWeight;
+ for (c = 0; c < window_layer_size; c++) neu1e[c] += g * syn1neg_window[c + l2];
+ for (c = 0; c < window_layer_size; c++) syn1neg_window[c + l2] += g * neu1[c];
+ if(cap == 1) for (c = 0; c < window_layer_size; c++) capParam(syn1neg_window, c + l2);
+ }
+ // Noise Contrastive Estimation
+ if (nce > 0) for (d = 0; d < nce + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * window_layer_size;
+ f = 0;
+ for (c = 0; c < window_layer_size; c++) f += neu1[c] * syn1nce_window[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha * currentWeight;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha * currentWeight;
+ else {
+ f = exp(f);
+ g = (label - f/(noise_distribution[target]*nce + f)) * alpha * currentWeight;
+ }
+ for (c = 0; c < window_layer_size; c++) neu1e[c] += g * syn1nce_window[c + l2];
+ for (c = 0; c < window_layer_size; c++) syn1nce_window[c + l2] += g * neu1[c];
+ if(cap == 1) for (c = 0; c < window_layer_size; c++) capParam(syn1nce_window, c + l2);
+ }
+ // hidden -> in
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ window_offset = a * layer1_size;
+ if(a > window) window_offset -= layer1_size;
+ for (c = 0; c < layer1_size; c++) syn0[c + last_word * layer1_size] += neu1e[c + window_offset];
+ }
+ }
+ }
+ else if (type == 3){ //train structured skip-gram
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ l1 = last_word * layer1_size;
+ window_offset = a * layer1_size;
+ if(a > window) window_offset -= layer1_size;
+ for (c = 0; c < layer1_size; c++) neu1e[c] = 0;
+ // HIERARCHICAL SOFTMAX
+ if (hs) for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * window_layer_size;
+ // Propagate hidden -> output
+ for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1_window[c + l2 + window_offset];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha * currentWeight;
+ // Propagate errors output -> hidden
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1_window[c + l2 + window_offset];
+ // Learn weights hidden -> output
+ for (c = 0; c < layer1_size; c++) syn1[c + l2 + window_offset] += g * syn0[c + l1];
+ if(cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1, c + l2 + window_offset);
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * window_layer_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1neg_window[c + l2 + window_offset];
+ if (f > MAX_EXP) g = (label - 1) * alpha * currentWeight;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha * currentWeight;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha * currentWeight;
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg_window[c + l2 + window_offset];
+ for (c = 0; c < layer1_size; c++) syn1neg_window[c + l2 + window_offset] += g * syn0[c + l1];
+ if(cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1neg_window, c + l2 + window_offset);
+ }
+ // Noise Constrastive Estimation
+ if (nce > 0) for (d = 0; d < nce + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * window_layer_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1nce_window[c + l2 + window_offset];
+ if (f > MAX_EXP) g = (label - 1) * alpha * currentWeight;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha * currentWeight;
+ else {
+ f = exp(f);
+ g = (label - f/(noise_distribution[target]*nce + f)) * alpha * currentWeight;
+ }
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1nce_window[c + l2 + window_offset];
+ for (c = 0; c < layer1_size; c++) syn1nce_window[c + l2 + window_offset] += g * syn0[c + l1];
+ if (cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1nce_window, c + l2 + window_offset);
+ }
+ // Learn weights input -> hidden
+ for (c = 0; c < layer1_size; c++) {syn0[c + l1] += neu1e[c]; if(syn0[c + l1] > 50) syn0[c + l1] = 50; if(syn0[c + l1] < -50) syn0[c + l1] = -50;}
+ }
+ }
+ else if(type == 4){ //training senna
+ // in -> hidden
+ cw = 0;
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ window_offset = a*layer1_size;
+ if (a > window) window_offset-=layer1_size;
+ for (c = 0; c < layer1_size; c++) neu1[c+window_offset] += syn0[c + last_word * layer1_size];
+ cw++;
+ }
+ if (cw) {
+ for (a = 0; a < window_hidden_size; a++){
+ c = a*window_layer_size;
+ for(b = 0; b < window_layer_size; b++){
+ neu2[a] += syn_window_hidden[c + b] * neu1[b];
+ }
+ }
+ if (hs) for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * window_hidden_size;
+ // Propagate hidden -> output
+ for (c = 0; c < window_hidden_size; c++) f += hardTanh(neu2[c]) * syn_hidden_word[c + l2];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha * currentWeight;
+ // Propagate errors output -> hidden
+ for (c = 0; c < window_hidden_size; c++) neu2e[c] += dHardTanh(neu2[c],g) * g * syn_hidden_word[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < window_hidden_size; c++) syn_hidden_word[c + l2] += dHardTanh(neu2[c],g) * g * neu2[c];
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * window_hidden_size;
+ f = 0;
+ for (c = 0; c < window_hidden_size; c++) f += hardTanh(neu2[c]) * syn_hidden_word_neg[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha * currentWeight / negative;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha * currentWeight / negative;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha * currentWeight / negative;
+ for (c = 0; c < window_hidden_size; c++) neu2e[c] += dHardTanh(neu2[c],g) * g * syn_hidden_word_neg[c + l2];
+ for (c = 0; c < window_hidden_size; c++) syn_hidden_word_neg[c + l2] += dHardTanh(neu2[c],g) * g * neu2[c];
+ }
+ for (a = 0; a < window_hidden_size; a++)
+ for(b = 0; b < window_layer_size; b++)
+ neu1e[b] += neu2e[a] * syn_window_hidden[a*window_layer_size + b];
+ for (a = 0; a < window_hidden_size; a++)
+ for(b = 0; b < window_layer_size; b++)
+ syn_window_hidden[a*window_layer_size + b] += neu2e[a] * neu1[b];
+ // hidden -> in
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ window_offset = a * layer1_size;
+ if(a > window) window_offset -= layer1_size;
+ for (c = 0; c < layer1_size; c++) syn0[c + last_word * layer1_size] += neu1e[c + window_offset];
+ }
+ }
+ }
+ else{
+ printf("unknown type %i", type);
+ exit(0);
+ }
+ sentence_position++;
+ if (sentence_position >= sentence_length) {
+ sentence_length = 0;
+ continue;
+ }
+ }
+ fclose(fi);
+ free(neu1);
+ free(neu1e);
+ pthread_exit(NULL);
+}
+
+void TrainModel() {
+ long a, b, c, d;
+ FILE *fo;
+ pthread_t *pt = (pthread_t *)malloc(num_threads * sizeof(pthread_t));
+ printf("Starting training using file %s\n", train_file);
+ starting_alpha = alpha;
+ if (read_vocab_file[0] != 0) ReadVocab(); else LearnVocabFromTrainFile();
+ if (save_vocab_file[0] != 0) SaveVocab();
+ if (output_file[0] == 0) return;
+ InitNet();
+ if (negative > 0 || nce > 0) InitUnigramTable();
+ if (negative_classes_file[0] != 0) InitClassUnigramTable();
+ start = clock();
+ for (a = 0; a < num_threads; a++) pthread_create(&pt[a], NULL, TrainModelThread, (void *)a);
+ for (a = 0; a < num_threads; a++) pthread_join(pt[a], NULL);
+ fo = fopen(output_file, "wb");
+ if (classes == 0) {
+ // Save the word vectors
+ fprintf(fo, "%lld %lld\n", vocab_size, layer1_size);
+ for (a = 0; a < vocab_size; a++) {
+ fprintf(fo, "%s ", vocab[a].word);
+ if (binary) for (b = 0; b < layer1_size; b++) fwrite(&syn0[a * layer1_size + b], sizeof(real), 1, fo);
+ else for (b = 0; b < layer1_size; b++) fprintf(fo, "%lf ", syn0[a * layer1_size + b]);
+ fprintf(fo, "\n");
+ }
+ } else {
+ // Run K-means on the word vectors
+ int clcn = classes, iter = 10, closeid;
+ int *centcn = (int *)malloc(classes * sizeof(int));
+ int *cl = (int *)calloc(vocab_size, sizeof(int));
+ real closev, x;
+ real *cent = (real *)calloc(classes * layer1_size, sizeof(real));
+ for (a = 0; a < vocab_size; a++) cl[a] = a % clcn;
+ for (a = 0; a < iter; a++) {
+ for (b = 0; b < clcn * layer1_size; b++) cent[b] = 0;
+ for (b = 0; b < clcn; b++) centcn[b] = 1;
+ for (c = 0; c < vocab_size; c++) {
+ for (d = 0; d < layer1_size; d++) cent[layer1_size * cl[c] + d] += syn0[c * layer1_size + d];
+ centcn[cl[c]]++;
+ }
+ for (b = 0; b < clcn; b++) {
+ closev = 0;
+ for (c = 0; c < layer1_size; c++) {
+ cent[layer1_size * b + c] /= centcn[b];
+ closev += cent[layer1_size * b + c] * cent[layer1_size * b + c];
+ }
+ closev = sqrt(closev);
+ for (c = 0; c < layer1_size; c++) cent[layer1_size * b + c] /= closev;
+ }
+ for (c = 0; c < vocab_size; c++) {
+ closev = -10;
+ closeid = 0;
+ for (d = 0; d < clcn; d++) {
+ x = 0;
+ for (b = 0; b < layer1_size; b++) x += cent[layer1_size * d + b] * syn0[c * layer1_size + b];
+ if (x > closev) {
+ closev = x;
+ closeid = d;
+ }
+ }
+ cl[c] = closeid;
+ }
+ }
+ // Save the K-means classes
+ for (a = 0; a < vocab_size; a++) fprintf(fo, "%s %d\n", vocab[a].word, cl[a]);
+ free(centcn);
+ free(cent);
+ free(cl);
+ }
+ fclose(fo);
+}
+
+int ArgPos(char *str, int argc, char **argv) {
+ int a;
+ for (a = 1; a < argc; a++) if (!strcmp(str, argv[a])) {
+ if (a == argc - 1) {
+ printf("Argument missing for %s\n", str);
+ exit(1);
+ }
+ return a;
+ }
+ return -1;
+}
+
+int main(int argc, char **argv) {
+ int i;
+ if (argc == 1) {
+ printf("WORD VECTOR estimation toolkit v 0.1c\n\n");
+ printf("Options:\n");
+ printf("Parameters for training:\n");
+ printf("\t-train <file>\n");
+ printf("\t\tUse text data from <file> to train the model\n");
+ printf("\t-output <file>\n");
+ printf("\t\tUse <file> to save the resulting word vectors / word clusters\n");
+ printf("\t-size <int>\n");
+ printf("\t\tSet size of word vectors; default is 100\n");
+ printf("\t-window <int>\n");
+ printf("\t\tSet max skip length between words; default is 5\n");
+ printf("\t-sample <float>\n");
+ printf("\t\tSet threshold for occurrence of words. Those that appear with higher frequency in the training data\n");
+ printf("\t\twill be randomly down-sampled; default is 1e-3, useful range is (0, 1e-5)\n");
+ printf("\t-hs <int>\n");
+ printf("\t\tUse Hierarchical Softmax; default is 0 (not used)\n");
+ printf("\t-negative <int>\n");
+ printf("\t\tNumber of negative examples; default is 5, common values are 3 - 10 (0 = not used)\n");
+ printf("\t-negative-classes <file>\n");
+ printf("\t\tNegative classes to sample from\n");
+ printf("\t-nce <int>\n");
+ printf("\t\tNumber of negative examples for nce; default is 0, common values are 3 - 10 (0 = not used)\n");
+ printf("\t-threads <int>\n");
+ printf("\t\tUse <int> threads (default 12)\n");
+ printf("\t-iter <int>\n");
+ printf("\t\tRun more training iterations (default 5)\n");
+ printf("\t-min-count <int>\n");
+ printf("\t\tThis will discard words that appear less than <int> times; default is 5\n");
+ printf("\t-alpha <float>\n");
+ printf("\t\tSet the starting learning rate; default is 0.025 for skip-gram and 0.05 for CBOW\n");
+ printf("\t-classes <int>\n");
+ printf("\t\tOutput word classes rather than word vectors; default number of classes is 0 (vectors are written)\n");
+ printf("\t-debug <int>\n");
+ printf("\t\tSet the debug mode (default = 2 = more info during training)\n");
+ printf("\t-binary <int>\n");
+ printf("\t\tSave the resulting vectors in binary moded; default is 0 (off)\n");
+ printf("\t-save-vocab <file>\n");
+ printf("\t\tThe vocabulary will be saved to <file>\n");
+ printf("\t-read-vocab <file>\n");
+ printf("\t\tThe vocabulary will be read from <file>, not constructed from the training data\n");
+ printf("\t-type <int>\n");
+ printf("\t\tType of embeddings (0 for cbow, 1 for skipngram, 2 for cwindow, 3 for structured skipngram, 4 for senna type)\n");
+ printf("\t-cap <int>\n");
+ printf("\t\tlimit the parameter values to the range [-50, 50]; default is 0 (off)\n");
+ printf("\nExamples:\n");
+ printf("./word2vec -train data.txt -output vec.txt -size 200 -window 5 -sample 1e-4 -negative 5 -hs 0 -binary 0 -type 1 -iter 3\n\n");
+ return 0;
+ }
+ output_file[0] = 0;
+ save_vocab_file[0] = 0;
+ read_vocab_file[0] = 0;
+ negative_classes_file[0] = 0;
+ if ((i = ArgPos((char *)"-size", argc, argv)) > 0) layer1_size = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-train", argc, argv)) > 0) strcpy(train_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-save-vocab", argc, argv)) > 0) strcpy(save_vocab_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-read-vocab", argc, argv)) > 0) strcpy(read_vocab_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-debug", argc, argv)) > 0) debug_mode = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-binary", argc, argv)) > 0) binary = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-type", argc, argv)) > 0) type = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-output", argc, argv)) > 0) strcpy(output_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-window", argc, argv)) > 0) window = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-sample", argc, argv)) > 0) sample = atof(argv[i + 1]);
+ if ((i = ArgPos((char *)"-hs", argc, argv)) > 0) hs = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-negative", argc, argv)) > 0) negative = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-negative-classes", argc, argv)) > 0) strcpy(negative_classes_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-nce", argc, argv)) > 0) nce = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-threads", argc, argv)) > 0) num_threads = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-iter", argc, argv)) > 0) iter = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-min-count", argc, argv)) > 0) min_count = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-classes", argc, argv)) > 0) classes = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-cap", argc, argv)) > 0) cap = atoi(argv[i + 1]);
+ if (type==0 || type==2 || type==4) alpha = 0.05;
+ if ((i = ArgPos((char *)"-alpha", argc, argv)) > 0) alpha = atof(argv[i + 1]);
+ vocab = (struct vocab_word *)calloc(vocab_max_size, sizeof(struct vocab_word));
+ vocab_hash = (int *)calloc(vocab_hash_size, sizeof(int));
+ expTable = (real *)malloc((EXP_TABLE_SIZE + 1) * sizeof(real));
+ for (i = 0; i < EXP_TABLE_SIZE; i++) {
+ expTable[i] = exp((i / (real)EXP_TABLE_SIZE * 2 - 1) * MAX_EXP); // Precompute the exp() table
+ expTable[i] = expTable[i] / (expTable[i] + 1); // Precompute f(x) = x / (x + 1)
+ }
+ TrainModel();
+ return 0;
+}
+
diff --git a/word-analogy.c b/word-analogy.c
new file mode 100644
index 0000000..bc78ba7
--- /dev/null
+++ b/word-analogy.c
@@ -0,0 +1,143 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include <stdlib.h>
+
+const long long max_size = 2000; // max length of strings
+const long long N = 40; // number of closest words that will be shown
+const long long max_w = 50; // max length of vocabulary entries
+
+int main(int argc, char **argv) {
+ FILE *f;
+ char st1[max_size];
+ char bestw[N][max_size];
+ char file_name[max_size], st[100][max_size];
+ float dist, len, bestd[N], vec[max_size];
+ long long words, size, a, b, c, d, cn, bi[100];
+ float *M;
+ char *vocab;
+ if (argc < 2) {
+ printf("Usage: ./word-analogy <FILE>\nwhere FILE contains word projections in the BINARY FORMAT\n");
+ return 0;
+ }
+ strcpy(file_name, argv[1]);
+ f = fopen(file_name, "rb");
+ if (f == NULL) {
+ printf("Input file not found\n");
+ return -1;
+ }
+ fscanf(f, "%lld", &words);
+ fscanf(f, "%lld", &size);
+ vocab = (char *)malloc((long long)words * max_w * sizeof(char));
+ M = (float *)malloc((long long)words * (long long)size * sizeof(float));
+ if (M == NULL) {
+ printf("Cannot allocate memory: %lld MB %lld %lld\n", (long long)words * size * sizeof(float) / 1048576, words, size);
+ return -1;
+ }
+ for (b = 0; b < words; b++) {
+ a = 0;
+ while (1) {
+ vocab[b * max_w + a] = fgetc(f);
+ if (feof(f) || (vocab[b * max_w + a] == ' ')) break;
+ if ((a < max_w) && (vocab[b * max_w + a] != '\n')) a++;
+ }
+ vocab[b * max_w + a] = 0;
+ for (a = 0; a < size; a++) fread(&M[a + b * size], sizeof(float), 1, f);
+ len = 0;
+ for (a = 0; a < size; a++) len += M[a + b * size] * M[a + b * size];
+ len = sqrt(len);
+ for (a = 0; a < size; a++) M[a + b * size] /= len;
+ }
+ fclose(f);
+ while (1) {
+ for (a = 0; a < N; a++) bestd[a] = 0;
+ for (a = 0; a < N; a++) bestw[a][0] = 0;
+ printf("Enter three words (EXIT to break): ");
+ a = 0;
+ while (1) {
+ st1[a] = fgetc(stdin);
+ if ((st1[a] == '\n') || (a >= max_size - 1)) {
+ st1[a] = 0;
+ break;
+ }
+ a++;
+ }
+ if (!strcmp(st1, "EXIT")) break;
+ cn = 0;
+ b = 0;
+ c = 0;
+ while (1) {
+ st[cn][b] = st1[c];
+ b++;
+ c++;
+ st[cn][b] = 0;
+ if (st1[c] == 0) break;
+ if (st1[c] == ' ') {
+ cn++;
+ b = 0;
+ c++;
+ }
+ }
+ cn++;
+ if (cn < 3) {
+ printf("Only %lld words were entered.. three words are needed at the input to perform the calculation\n", cn);
+ continue;
+ }
+ for (a = 0; a < cn; a++) {
+ for (b = 0; b < words; b++) if (!strcmp(&vocab[b * max_w], st[a])) break;
+ if (b == words) b = 0;
+ bi[a] = b;
+ printf("\nWord: %s Position in vocabulary: %lld\n", st[a], bi[a]);
+ if (b == 0) {
+ printf("Out of dictionary word!\n");
+ break;
+ }
+ }
+ if (b == 0) continue;
+ printf("\n Word Distance\n------------------------------------------------------------------------\n");
+ for (a = 0; a < size; a++) vec[a] = M[a + bi[1] * size] - M[a + bi[0] * size] + M[a + bi[2] * size];
+ len = 0;
+ for (a = 0; a < size; a++) len += vec[a] * vec[a];
+ len = sqrt(len);
+ for (a = 0; a < size; a++) vec[a] /= len;
+ for (a = 0; a < N; a++) bestd[a] = 0;
+ for (a = 0; a < N; a++) bestw[a][0] = 0;
+ for (c = 0; c < words; c++) {
+ if (c == bi[0]) continue;
+ if (c == bi[1]) continue;
+ if (c == bi[2]) continue;
+ a = 0;
+ for (b = 0; b < cn; b++) if (bi[b] == c) a = 1;
+ if (a == 1) continue;
+ dist = 0;
+ for (a = 0; a < size; a++) dist += vec[a] * M[a + c * size];
+ for (a = 0; a < N; a++) {
+ if (dist > bestd[a]) {
+ for (d = N - 1; d > a; d--) {
+ bestd[d] = bestd[d - 1];
+ strcpy(bestw[d], bestw[d - 1]);
+ }
+ bestd[a] = dist;
+ strcpy(bestw[a], &vocab[c * max_w]);
+ break;
+ }
+ }
+ }
+ for (a = 0; a < N; a++) printf("%50s\t\t%f\n", bestw[a], bestd[a]);
+ }
+ return 0;
+}
diff --git a/word2phrase.c b/word2phrase.c
new file mode 100644
index 0000000..24238bc
--- /dev/null
+++ b/word2phrase.c
@@ -0,0 +1,292 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include <pthread.h>
+
+#define MAX_STRING 60
+
+const int vocab_hash_size = 500000000; // Maximum 500M entries in the vocabulary
+
+typedef float real; // Precision of float numbers
+
+struct vocab_word {
+ long long cn;
+ char *word;
+};
+
+char train_file[MAX_STRING], output_file[MAX_STRING];
+struct vocab_word *vocab;
+int debug_mode = 2, min_count = 5, *vocab_hash, min_reduce = 1;
+long long vocab_max_size = 10000, vocab_size = 0;
+long long train_words = 0;
+real threshold = 100;
+
+unsigned long long next_random = 1;
+
+// Reads a single word from a file, assuming space + tab + EOL to be word boundaries
+void ReadWord(char *word, FILE *fin) {
+ int a = 0, ch;
+ while (!feof(fin)) {
+ ch = fgetc(fin);
+ if (ch == 13) continue;
+ if ((ch == ' ') || (ch == '\t') || (ch == '\n')) {
+ if (a > 0) {
+ if (ch == '\n') ungetc(ch, fin);
+ break;
+ }
+ if (ch == '\n') {
+ strcpy(word, (char *)"</s>");
+ return;
+ } else continue;
+ }
+ word[a] = ch;
+ a++;
+ if (a >= MAX_STRING - 1) a--; // Truncate too long words
+ }
+ word[a] = 0;
+}
+
+// Returns hash value of a word
+int GetWordHash(char *word) {
+ unsigned long long a, hash = 1;
+ for (a = 0; a < strlen(word); a++) hash = hash * 257 + word[a];
+ hash = hash % vocab_hash_size;
+ return hash;
+}
+
+// Returns position of a word in the vocabulary; if the word is not found, returns -1
+int SearchVocab(char *word) {
+ unsigned int hash = GetWordHash(word);
+ while (1) {
+ if (vocab_hash[hash] == -1) return -1;
+ if (!strcmp(word, vocab[vocab_hash[hash]].word)) return vocab_hash[hash];
+ hash = (hash + 1) % vocab_hash_size;
+ }
+ return -1;
+}
+
+// Reads a word and returns its index in the vocabulary
+int ReadWordIndex(FILE *fin) {
+ char word[MAX_STRING];
+ ReadWord(word, fin);
+ if (feof(fin)) return -1;
+ return SearchVocab(word);
+}
+
+// Adds a word to the vocabulary
+int AddWordToVocab(char *word) {
+ unsigned int hash, length = strlen(word) + 1;
+ if (length > MAX_STRING) length = MAX_STRING;
+ vocab[vocab_size].word = (char *)calloc(length, sizeof(char));
+ strcpy(vocab[vocab_size].word, word);
+ vocab[vocab_size].cn = 0;
+ vocab_size++;
+ // Reallocate memory if needed
+ if (vocab_size + 2 >= vocab_max_size) {
+ vocab_max_size += 10000;
+ vocab=(struct vocab_word *)realloc(vocab, vocab_max_size * sizeof(struct vocab_word));
+ }
+ hash = GetWordHash(word);
+ while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
+ vocab_hash[hash]=vocab_size - 1;
+ return vocab_size - 1;
+}
+
+// Used later for sorting by word counts
+int VocabCompare(const void *a, const void *b) {
+ return ((struct vocab_word *)b)->cn - ((struct vocab_word *)a)->cn;
+}
+
+// Sorts the vocabulary by frequency using word counts
+void SortVocab() {
+ int a;
+ unsigned int hash;
+ // Sort the vocabulary and keep </s> at the first position
+ qsort(&vocab[1], vocab_size - 1, sizeof(struct vocab_word), VocabCompare);
+ for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
+ for (a = 0; a < vocab_size; a++) {
+ // Words occuring less than min_count times will be discarded from the vocab
+ if (vocab[a].cn < min_count) {
+ vocab_size--;
+ free(vocab[vocab_size].word);
+ } else {
+ // Hash will be re-computed, as after the sorting it is not actual
+ hash = GetWordHash(vocab[a].word);
+ while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
+ vocab_hash[hash] = a;
+ }
+ }
+ vocab = (struct vocab_word *)realloc(vocab, vocab_size * sizeof(struct vocab_word));
+}
+
+// Reduces the vocabulary by removing infrequent tokens
+void ReduceVocab() {
+ int a, b = 0;
+ unsigned int hash;
+ for (a = 0; a < vocab_size; a++) if (vocab[a].cn > min_reduce) {
+ vocab[b].cn = vocab[a].cn;
+ vocab[b].word = vocab[a].word;
+ b++;
+ } else free(vocab[a].word);
+ vocab_size = b;
+ for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
+ for (a = 0; a < vocab_size; a++) {
+ // Hash will be re-computed, as it is not actual
+ hash = GetWordHash(vocab[a].word);
+ while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
+ vocab_hash[hash] = a;
+ }
+ fflush(stdout);
+ min_reduce++;
+}
+
+void LearnVocabFromTrainFile() {
+ char word[MAX_STRING], last_word[MAX_STRING], bigram_word[MAX_STRING * 2];
+ FILE *fin;
+ long long a, i, start = 1;
+ for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
+ fin = fopen(train_file, "rb");
+ if (fin == NULL) {
+ printf("ERROR: training data file not found!\n");
+ exit(1);
+ }
+ vocab_size = 0;
+ AddWordToVocab((char *)"</s>");
+ while (1) {
+ ReadWord(word, fin);
+ if (feof(fin)) break;
+ if (!strcmp(word, "</s>")) {
+ start = 1;
+ continue;
+ } else start = 0;
+ train_words++;
+ if ((debug_mode > 1) && (train_words % 100000 == 0)) {
+ printf("Words processed: %lldK Vocab size: %lldK %c", train_words / 1000, vocab_size / 1000, 13);
+ fflush(stdout);
+ }
+ i = SearchVocab(word);
+ if (i == -1) {
+ a = AddWordToVocab(word);
+ vocab[a].cn = 1;
+ } else vocab[i].cn++;
+ if (start) continue;
+ sprintf(bigram_word, "%s_%s", last_word, word);
+ bigram_word[MAX_STRING - 1] = 0;
+ strcpy(last_word, word);
+ i = SearchVocab(bigram_word);
+ if (i == -1) {
+ a = AddWordToVocab(bigram_word);
+ vocab[a].cn = 1;
+ } else vocab[i].cn++;
+ if (vocab_size > vocab_hash_size * 0.7) ReduceVocab();
+ }
+ SortVocab();
+ if (debug_mode > 0) {
+ printf("\nVocab size (unigrams + bigrams): %lld\n", vocab_size);
+ printf("Words in train file: %lld\n", train_words);
+ }
+ fclose(fin);
+}
+
+void TrainModel() {
+ long long pa = 0, pb = 0, pab = 0, oov, i, li = -1, cn = 0;
+ char word[MAX_STRING], last_word[MAX_STRING], bigram_word[MAX_STRING * 2];
+ real score;
+ FILE *fo, *fin;
+ printf("Starting training using file %s\n", train_file);
+ LearnVocabFromTrainFile();
+ fin = fopen(train_file, "rb");
+ fo = fopen(output_file, "wb");
+ word[0] = 0;
+ while (1) {
+ strcpy(last_word, word);
+ ReadWord(word, fin);
+ if (feof(fin)) break;
+ if (!strcmp(word, "</s>")) {
+ fprintf(fo, "\n");
+ continue;
+ }
+ cn++;
+ if ((debug_mode > 1) && (cn % 100000 == 0)) {
+ printf("Words written: %lldK%c", cn / 1000, 13);
+ fflush(stdout);
+ }
+ oov = 0;
+ i = SearchVocab(word);
+ if (i == -1) oov = 1; else pb = vocab[i].cn;
+ if (li == -1) oov = 1;
+ li = i;
+ sprintf(bigram_word, "%s_%s", last_word, word);
+ bigram_word[MAX_STRING - 1] = 0;
+ i = SearchVocab(bigram_word);
+ if (i == -1) oov = 1; else pab = vocab[i].cn;
+ if (pa < min_count) oov = 1;
+ if (pb < min_count) oov = 1;
+ if (oov) score = 0; else score = (pab - min_count) / (real)pa / (real)pb * (real)train_words;
+ if (score > threshold) {
+ fprintf(fo, "_%s", word);
+ pb = 0;
+ } else fprintf(fo, " %s", word);
+ pa = pb;
+ }
+ fclose(fo);
+ fclose(fin);
+}
+
+int ArgPos(char *str, int argc, char **argv) {
+ int a;
+ for (a = 1; a < argc; a++) if (!strcmp(str, argv[a])) {
+ if (a == argc - 1) {
+ printf("Argument missing for %s\n", str);
+ exit(1);
+ }
+ return a;
+ }
+ return -1;
+}
+
+int main(int argc, char **argv) {
+ int i;
+ if (argc == 1) {
+ printf("WORD2PHRASE tool v0.1a\n\n");
+ printf("Options:\n");
+ printf("Parameters for training:\n");
+ printf("\t-train <file>\n");
+ printf("\t\tUse text data from <file> to train the model\n");
+ printf("\t-output <file>\n");
+ printf("\t\tUse <file> to save the resulting word vectors / word clusters / phrases\n");
+ printf("\t-min-count <int>\n");
+ printf("\t\tThis will discard words that appear less than <int> times; default is 5\n");
+ printf("\t-threshold <float>\n");
+ printf("\t\t The <float> value represents threshold for forming the phrases (higher means less phrases); default 100\n");
+ printf("\t-debug <int>\n");
+ printf("\t\tSet the debug mode (default = 2 = more info during training)\n");
+ printf("\nExamples:\n");
+ printf("./word2phrase -train text.txt -output phrases.txt -threshold 100 -debug 2\n\n");
+ return 0;
+ }
+ if ((i = ArgPos((char *)"-train", argc, argv)) > 0) strcpy(train_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-debug", argc, argv)) > 0) debug_mode = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-output", argc, argv)) > 0) strcpy(output_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-min-count", argc, argv)) > 0) min_count = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-threshold", argc, argv)) > 0) threshold = atof(argv[i + 1]);
+ vocab = (struct vocab_word *)calloc(vocab_max_size, sizeof(struct vocab_word));
+ vocab_hash = (int *)calloc(vocab_hash_size, sizeof(int));
+ TrainModel();
+ return 0;
+}
diff --git a/word2vec.c b/word2vec.c
new file mode 100644
index 0000000..67d9846
--- /dev/null
+++ b/word2vec.c
@@ -0,0 +1,1274 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include <pthread.h>
+
+#define MAX_STRING 100
+#define EXP_TABLE_SIZE 1000
+#define MAX_EXP 6
+#define MAX_SENTENCE_LENGTH 1000
+#define MAX_CODE_LENGTH 40
+
+const int vocab_hash_size = 30000000; // Maximum 30 * 0.7 = 21M words in the vocabulary
+
+typedef float real; // Precision of float numbers
+
+struct vocab_word {
+ long long cn;
+ int *point;
+ char *word, *code, codelen;
+};
+
+char train_file[MAX_STRING], output_file[MAX_STRING];
+char save_vocab_file[MAX_STRING], read_vocab_file[MAX_STRING];
+struct vocab_word *vocab;
+int binary = 0, type = 1, debug_mode = 2, window = 5, min_count = 5, num_threads = 12, min_reduce = 1;
+int *vocab_hash;
+long long vocab_max_size = 1000, vocab_size = 0, layer1_size = 100;
+long long train_words = 0, word_count_actual = 0, iter = 5, file_size = 0, classes = 0;
+real alpha = 0.025, starting_alpha, sample = 1e-3;
+real *syn0, *syn1, *syn1neg, *syn1nce, *expTable;
+clock_t start;
+
+real *syn1_window, *syn1neg_window, *syn1nce_window;
+int w_offset, window_layer_size;
+
+int window_hidden_size = 500;
+real *syn_window_hidden, *syn_hidden_word, *syn_hidden_word_neg, *syn_hidden_word_nce;
+
+int hs = 0, negative = 5;
+const int table_size = 1e8;
+int *table;
+
+//constrastive negative sampling
+char negative_classes_file[MAX_STRING];
+int *word_to_group;
+int *group_to_table; //group_size*table_size
+int class_number;
+
+//nce
+real* noise_distribution;
+int nce = 0;
+
+//param caps
+real CAP_VALUE = 50;
+int cap = 0;
+
+void capParam(real* array, int index){
+ if(array[index] > CAP_VALUE)
+ array[index] = CAP_VALUE;
+ else if(array[index] < -CAP_VALUE)
+ array[index] = -CAP_VALUE;
+}
+
+real hardTanh(real x){
+ if(x>=1){
+ return 1;
+ }
+ else if(x<=-1){
+ return -1;
+ }
+ else{
+ return x;
+ }
+}
+
+real dHardTanh(real x, real g){
+ if(x > 1 && g > 0){
+ return 0;
+ }
+ if(x < -1 && g < 0){
+ return 0;
+ }
+ return 1;
+}
+
+void InitUnigramTable() {
+ int a, i;
+ long long train_words_pow = 0;
+ real d1, power = 0.75;
+ table = (int *)malloc(table_size * sizeof(int));
+ for (a = 0; a < vocab_size; a++) train_words_pow += pow(vocab[a].cn, power);
+ i = 0;
+ d1 = pow(vocab[i].cn, power) / (real)train_words_pow;
+ for (a = 0; a < table_size; a++) {
+ table[a] = i;
+ if (a / (real)table_size > d1) {
+ i++;
+ d1 += pow(vocab[i].cn, power) / (real)train_words_pow;
+ }
+ if (i >= vocab_size) i = vocab_size - 1;
+ }
+
+ noise_distribution = (real *)calloc(vocab_size, sizeof(real));
+ for (a = 0; a < vocab_size; a++) noise_distribution[a] = pow(vocab[a].cn, power)/(real)train_words_pow;
+}
+
+// Reads a single word from a file, assuming space + tab + EOL to be word boundaries
+void ReadWord(char *word, FILE *fin) {
+ int a = 0, ch;
+ while (!feof(fin)) {
+ ch = fgetc(fin);
+ if (ch == 13) continue;
+ if ((ch == ' ') || (ch == '\t') || (ch == '\n')) {
+ if (a > 0) {
+ if (ch == '\n') ungetc(ch, fin);
+ break;
+ }
+ if (ch == '\n') {
+ strcpy(word, (char *)"</s>");
+ return;
+ } else continue;
+ }
+ word[a] = ch;
+ a++;
+ if (a >= MAX_STRING - 1) a--; // Truncate too long words
+ }
+ word[a] = 0;
+}
+
+// Returns hash value of a word
+int GetWordHash(char *word) {
+ unsigned long long a, hash = 0;
+ for (a = 0; a < strlen(word); a++) hash = hash * 257 + word[a];
+ hash = hash % vocab_hash_size;
+ return hash;
+}
+
+// Returns position of a word in the vocabulary; if the word is not found, returns -1
+int SearchVocab(char *word) {
+ unsigned int hash = GetWordHash(word);
+ while (1) {
+ if (vocab_hash[hash] == -1) return -1;
+ if (!strcmp(word, vocab[vocab_hash[hash]].word)) return vocab_hash[hash];
+ hash = (hash + 1) % vocab_hash_size;
+ }
+ return -1;
+}
+
+// Reads a word and returns its index in the vocabulary
+int ReadWordIndex(FILE *fin) {
+ char word[MAX_STRING];
+ ReadWord(word, fin);
+ if (feof(fin)) return -1;
+ return SearchVocab(word);
+}
+
+// Adds a word to the vocabulary
+int AddWordToVocab(char *word) {
+ unsigned int hash, length = strlen(word) + 1;
+ if (length > MAX_STRING) length = MAX_STRING;
+ vocab[vocab_size].word = (char *)calloc(length, sizeof(char));
+ strcpy(vocab[vocab_size].word, word);
+ vocab[vocab_size].cn = 0;
+ vocab_size++;
+ // Reallocate memory if needed
+ if (vocab_size + 2 >= vocab_max_size) {
+ vocab_max_size += 1000;
+ vocab = (struct vocab_word *)realloc(vocab, vocab_max_size * sizeof(struct vocab_word));
+ }
+ hash = GetWordHash(word);
+ while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
+ vocab_hash[hash] = vocab_size - 1;
+ return vocab_size - 1;
+}
+
+// Used later for sorting by word counts
+int VocabCompare(const void *a, const void *b) {
+ return ((struct vocab_word *)b)->cn - ((struct vocab_word *)a)->cn;
+}
+
+// Sorts the vocabulary by frequency using word counts
+void SortVocab() {
+ int a, size;
+ unsigned int hash;
+ // Sort the vocabulary and keep </s> at the first position
+ qsort(&vocab[1], vocab_size - 1, sizeof(struct vocab_word), VocabCompare);
+ for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
+ size = vocab_size;
+ train_words = 0;
+ for (a = 0; a < size; a++) {
+ // Words occuring less than min_count times will be discarded from the vocab
+ if ((vocab[a].cn < min_count) && (a != 0)) {
+ vocab_size--;
+ free(vocab[a].word);
+ } else {
+ // Hash will be re-computed, as after the sorting it is not actual
+ hash=GetWordHash(vocab[a].word);
+ while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
+ vocab_hash[hash] = a;
+ train_words += vocab[a].cn;
+ }
+ }
+ vocab = (struct vocab_word *)realloc(vocab, (vocab_size + 1) * sizeof(struct vocab_word));
+ // Allocate memory for the binary tree construction
+ for (a = 0; a < vocab_size; a++) {
+ vocab[a].code = (char *)calloc(MAX_CODE_LENGTH, sizeof(char));
+ vocab[a].point = (int *)calloc(MAX_CODE_LENGTH, sizeof(int));
+ }
+}
+
+// Reduces the vocabulary by removing infrequent tokens
+void ReduceVocab() {
+ int a, b = 0;
+ unsigned int hash;
+ for (a = 0; a < vocab_size; a++) if (vocab[a].cn > min_reduce) {
+ vocab[b].cn = vocab[a].cn;
+ vocab[b].word = vocab[a].word;
+ b++;
+ } else free(vocab[a].word);
+ vocab_size = b;
+ for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
+ for (a = 0; a < vocab_size; a++) {
+ // Hash will be re-computed, as it is not actual
+ hash = GetWordHash(vocab[a].word);
+ while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
+ vocab_hash[hash] = a;
+ }
+ fflush(stdout);
+ min_reduce++;
+}
+
+// Create binary Huffman tree using the word counts
+// Frequent words will have short uniqe binary codes
+void CreateBinaryTree() {
+ long long a, b, i, min1i, min2i, pos1, pos2, point[MAX_CODE_LENGTH];
+ char code[MAX_CODE_LENGTH];
+ long long *count = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
+ long long *binary = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
+ long long *parent_node = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
+ for (a = 0; a < vocab_size; a++) count[a] = vocab[a].cn;
+ for (a = vocab_size; a < vocab_size * 2; a++) count[a] = 1e15;
+ pos1 = vocab_size - 1;
+ pos2 = vocab_size;
+ // Following algorithm constructs the Huffman tree by adding one node at a time
+ for (a = 0; a < vocab_size - 1; a++) {
+ // First, find two smallest nodes 'min1, min2'
+ if (pos1 >= 0) {
+ if (count[pos1] < count[pos2]) {
+ min1i = pos1;
+ pos1--;
+ } else {
+ min1i = pos2;
+ pos2++;
+ }
+ } else {
+ min1i = pos2;
+ pos2++;
+ }
+ if (pos1 >= 0) {
+ if (count[pos1] < count[pos2]) {
+ min2i = pos1;
+ pos1--;
+ } else {
+ min2i = pos2;
+ pos2++;
+ }
+ } else {
+ min2i = pos2;
+ pos2++;
+ }
+ count[vocab_size + a] = count[min1i] + count[min2i];
+ parent_node[min1i] = vocab_size + a;
+ parent_node[min2i] = vocab_size + a;
+ binary[min2i] = 1;
+ }
+ // Now assign binary code to each vocabulary word
+ for (a = 0; a < vocab_size; a++) {
+ b = a;
+ i = 0;
+ while (1) {
+ code[i] = binary[b];
+ point[i] = b;
+ i++;
+ b = parent_node[b];
+ if (b == vocab_size * 2 - 2) break;
+ }
+ vocab[a].codelen = i;
+ vocab[a].point[0] = vocab_size - 2;
+ for (b = 0; b < i; b++) {
+ vocab[a].code[i - b - 1] = code[b];
+ vocab[a].point[i - b] = point[b] - vocab_size;
+ }
+ }
+ free(count);
+ free(binary);
+ free(parent_node);
+}
+
+void LearnVocabFromTrainFile() {
+ char word[MAX_STRING];
+ FILE *fin;
+ long long a, i;
+ for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
+ fin = fopen(train_file, "rb");
+ if (fin == NULL) {
+ printf("ERROR: training data file not found!\n");
+ exit(1);
+ }
+ vocab_size = 0;
+ AddWordToVocab((char *)"</s>");
+ while (1) {
+ ReadWord(word, fin);
+ if (feof(fin)) break;
+ train_words++;
+ if ((debug_mode > 1) && (train_words % 100000 == 0)) {
+ printf("%lldK%c", train_words / 1000, 13);
+ fflush(stdout);
+ }
+ i = SearchVocab(word);
+ if (i == -1) {
+ a = AddWordToVocab(word);
+ vocab[a].cn = 1;
+ } else vocab[i].cn++;
+ if (vocab_size > vocab_hash_size * 0.7) ReduceVocab();
+ }
+ SortVocab();
+ if (debug_mode > 0) {
+ printf("Vocab size: %lld\n", vocab_size);
+ printf("Words in train file: %lld\n", train_words);
+ }
+ file_size = ftell(fin);
+ fclose(fin);
+}
+
+void SaveVocab() {
+ long long i;
+ FILE *fo = fopen(save_vocab_file, "wb");
+ for (i = 0; i < vocab_size; i++) fprintf(fo, "%s %lld\n", vocab[i].word, vocab[i].cn);
+ fclose(fo);
+}
+
+void ReadVocab() {
+ long long a, i = 0;
+ char c;
+ char word[MAX_STRING];
+ FILE *fin = fopen(read_vocab_file, "rb");
+ if (fin == NULL) {
+ printf("Vocabulary file not found\n");
+ exit(1);
+ }
+ for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
+ vocab_size = 0;
+ while (1) {
+ ReadWord(word, fin);
+ if (feof(fin)) break;
+ a = AddWordToVocab(word);
+ fscanf(fin, "%lld%c", &vocab[a].cn, &c);
+ i++;
+ }
+ SortVocab();
+ if (debug_mode > 0) {
+ printf("Vocab size: %lld\n", vocab_size);
+ printf("Words in train file: %lld\n", train_words);
+ }
+ fin = fopen(train_file, "rb");
+ if (fin == NULL) {
+ printf("ERROR: training data file not found!\n");
+ exit(1);
+ }
+ fseek(fin, 0, SEEK_END);
+ file_size = ftell(fin);
+ fclose(fin);
+}
+
+void InitClassUnigramTable() {
+ long long a,c;
+ printf("loading class unigrams \n");
+ FILE *fin = fopen(negative_classes_file, "rb");
+ if (fin == NULL) {
+ printf("ERROR: class file not found!\n");
+ exit(1);
+ }
+ word_to_group = (int *)malloc(vocab_size * sizeof(int));
+ for(a = 0; a < vocab_size; a++) word_to_group[a] = -1;
+ char class[MAX_STRING];
+ char prev_class[MAX_STRING];
+ prev_class[0] = 0;
+ char word[MAX_STRING];
+ class_number = -1;
+ while (1) {
+ if (feof(fin)) break;
+ ReadWord(class, fin);
+ ReadWord(word, fin);
+ int word_index = SearchVocab(word);
+ if (word_index != -1){
+ if(strcmp(class, prev_class) != 0){
+ class_number++;
+ strcpy(prev_class, class);
+ }
+ word_to_group[word_index] = class_number;
+ }
+ ReadWord(word, fin);
+ }
+ class_number++;
+ fclose(fin);
+
+ group_to_table = (int *)malloc(table_size * class_number * sizeof(int));
+ long long train_words_pow = 0;
+ real d1, power = 0.75;
+
+ for(c = 0; c < class_number; c++){
+ long long offset = c * table_size;
+ train_words_pow = 0;
+ for (a = 0; a < vocab_size; a++) if(word_to_group[a] == c) train_words_pow += pow(vocab[a].cn, power);
+ int i = 0;
+ while(word_to_group[i]!=c && i < vocab_size) i++;
+ d1 = pow(vocab[i].cn, power) / (real)train_words_pow;
+ for (a = 0; a < table_size; a++) {
+ //printf("index %lld , word %d\n", a, i);
+ group_to_table[offset + a] = i;
+ if (a / (real)table_size > d1) {
+ i++;
+ while(word_to_group[i]!=c && i < vocab_size) i++;
+ d1 += pow(vocab[i].cn, power) / (real)train_words_pow;
+ }
+ if (i >= vocab_size) while(word_to_group[i]!=c && i >= 0) i--;
+ }
+ }
+}
+
+void InitNet() {
+ long long a, b;
+ unsigned long long next_random = 1;
+ window_layer_size = layer1_size*window*2;
+ a = posix_memalign((void **)&syn0, 128, (long long)vocab_size * layer1_size * sizeof(real));
+ if (syn0 == NULL) {printf("Memory allocation failed\n"); exit(1);}
+
+ if (hs) {
+ a = posix_memalign((void **)&syn1, 128, (long long)vocab_size * layer1_size * sizeof(real));
+ if (syn1 == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn1_window, 128, (long long)vocab_size * window_layer_size * sizeof(real));
+ if (syn1_window == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn_hidden_word, 128, (long long)vocab_size * window_hidden_size * sizeof(real));
+ if (syn_hidden_word == NULL) {printf("Memory allocation failed\n"); exit(1);}
+
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++)
+ syn1[a * layer1_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_layer_size; b++)
+ syn1_window[a * window_layer_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_hidden_size; b++)
+ syn_hidden_word[a * window_hidden_size + b] = 0;
+ }
+ if (negative>0) {
+ a = posix_memalign((void **)&syn1neg, 128, (long long)vocab_size * layer1_size * sizeof(real));
+ if (syn1neg == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn1neg_window, 128, (long long)vocab_size * window_layer_size * sizeof(real));
+ if (syn1neg_window == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn_hidden_word_neg, 128, (long long)vocab_size * window_hidden_size * sizeof(real));
+ if (syn_hidden_word_neg == NULL) {printf("Memory allocation failed\n"); exit(1);}
+
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++)
+ syn1neg[a * layer1_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_layer_size; b++)
+ syn1neg_window[a * window_layer_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_hidden_size; b++)
+ syn_hidden_word_neg[a * window_hidden_size + b] = 0;
+ }
+ if (nce>0) {
+ a = posix_memalign((void **)&syn1nce, 128, (long long)vocab_size * layer1_size * sizeof(real));
+ if (syn1nce == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn1nce_window, 128, (long long)vocab_size * window_layer_size * sizeof(real));
+ if (syn1nce_window == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn_hidden_word_nce, 128, (long long)vocab_size * window_hidden_size * sizeof(real));
+ if (syn_hidden_word_nce == NULL) {printf("Memory allocation failed\n"); exit(1);}
+
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++)
+ syn1nce[a * layer1_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_layer_size; b++)
+ syn1nce_window[a * window_layer_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_hidden_size; b++)
+ syn_hidden_word_nce[a * window_hidden_size + b] = 0;
+ }
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++) {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ syn0[a * layer1_size + b] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / layer1_size;
+ }
+
+ a = posix_memalign((void **)&syn_window_hidden, 128, window_hidden_size * window_layer_size * sizeof(real));
+ if (syn_window_hidden == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ for (a = 0; a < window_hidden_size * window_layer_size; a++){
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ syn_window_hidden[a] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / (window_hidden_size*window_layer_size);
+ }
+
+ CreateBinaryTree();
+}
+
+void *TrainModelThread(void *id) {
+ long long a, b, d, cw, word, last_word, sentence_length = 0, sentence_position = 0;
+ long long word_count = 0, last_word_count = 0, sen[MAX_SENTENCE_LENGTH + 1];
+ long long l1, l2, c, target, label, local_iter = iter;
+ unsigned long long next_random = (long long)id;
+ real f, g;
+ clock_t now;
+ int input_len_1 = layer1_size;
+ int window_offset = -1;
+ if(type == 2 || type == 4){
+ input_len_1=window_layer_size;
+ }
+ real *neu1 = (real *)calloc(input_len_1, sizeof(real));
+ real *neu1e = (real *)calloc(input_len_1, sizeof(real));
+
+ int input_len_2 = 0;
+ if(type == 4){
+ input_len_2 = window_hidden_size;
+ }
+ real *neu2 = (real *)calloc(input_len_2, sizeof(real));
+ real *neu2e = (real *)calloc(input_len_2, sizeof(real));
+
+ FILE *fi = fopen(train_file, "rb");
+ fseek(fi, file_size / (long long)num_threads * (long long)id, SEEK_SET);
+ while (1) {
+ if (word_count - last_word_count > 10000) {
+ word_count_actual += word_count - last_word_count;
+ last_word_count = word_count;
+ if ((debug_mode > 1)) {
+ now=clock();
+ printf("%cAlpha: %f Progress: %.2f%% Words/thread/sec: %.2fk ", 13, alpha,
+ word_count_actual / (real)(iter * train_words + 1) * 100,
+ word_count_actual / ((real)(now - start + 1) / (real)CLOCKS_PER_SEC * 1000));
+ fflush(stdout);
+ }
+ alpha = starting_alpha * (1 - word_count_actual / (real)(iter * train_words + 1));
+ if (alpha < starting_alpha * 0.0001) alpha = starting_alpha * 0.0001;
+ }
+ if (sentence_length == 0) {
+ while (1) {
+ word = ReadWordIndex(fi);
+ if (feof(fi)) break;
+ if (word == -1) continue;
+ word_count++;
+ if (word == 0) break;
+ // The subsampling randomly discards frequent words while keeping the ranking same
+ if (sample > 0) {
+ real ran = (sqrt(vocab[word].cn / (sample * train_words)) + 1) * (sample * train_words) / vocab[word].cn;
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if (ran < (next_random & 0xFFFF) / (real)65536) continue;
+ }
+ sen[sentence_length] = word;
+ sentence_length++;
+ if (sentence_length >= MAX_SENTENCE_LENGTH) break;
+ }
+ sentence_position = 0;
+ }
+ if (feof(fi) || (word_count > train_words / num_threads)) {
+ word_count_actual += word_count - last_word_count;
+ local_iter--;
+ if (local_iter == 0) break;
+ word_count = 0;
+ last_word_count = 0;
+ sentence_length = 0;
+ fseek(fi, file_size / (long long)num_threads * (long long)id, SEEK_SET);
+ continue;
+ }
+ word = sen[sentence_position];
+ if (word == -1) continue;
+ for (c = 0; c < input_len_1; c++) neu1[c] = 0;
+ for (c = 0; c < input_len_1; c++) neu1e[c] = 0;
+ for (c = 0; c < input_len_2; c++) neu2[c] = 0;
+ for (c = 0; c < input_len_2; c++) neu2e[c] = 0;
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ b = next_random % window;
+ if (type == 0) { //train the cbow architecture
+ // in -> hidden
+ cw = 0;
+ for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ for (c = 0; c < layer1_size; c++) neu1[c] += syn0[c + last_word * layer1_size];
+ cw++;
+ }
+ if (cw) {
+ for (c = 0; c < layer1_size; c++) neu1[c] /= cw;
+ if (hs) for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * layer1_size;
+ // Propagate hidden -> output
+ for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1[c + l2];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * neu1[c];
+ if(cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1, c + l2);
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * layer1_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1neg[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg[c + l2];
+ for (c = 0; c < layer1_size; c++) syn1neg[c + l2] += g * neu1[c];
+ if (cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1neg, c + l2);
+ }
+ // Noise Contrastive Estimation
+ if (nce > 0) for (d = 0; d < nce + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * layer1_size;
+ f = 0;
+
+ for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1nce[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else {
+ f = exp(f);
+ g = (label - f/(noise_distribution[target]*nce + f)) * alpha;
+ }
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1nce[c + l2];
+ for (c = 0; c < layer1_size; c++) syn1nce[c + l2] += g * neu1[c];
+ if(cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1nce,c + l2);
+ }
+ // hidden -> in
+ for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ for (c = 0; c < layer1_size; c++) syn0[c + last_word * layer1_size] += neu1e[c];
+ }
+ }
+ } else if(type==1) { //train skip-gram
+ for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ l1 = last_word * layer1_size;
+ for (c = 0; c < layer1_size; c++) neu1e[c] = 0;
+ // HIERARCHICAL SOFTMAX
+ if (hs) for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * layer1_size;
+ // Propagate hidden -> output
+ for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1[c + l2];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * syn0[c + l1];
+ if (cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1, c + l2);
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * layer1_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1neg[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg[c + l2];
+ for (c = 0; c < layer1_size; c++) syn1neg[c + l2] += g * syn0[c + l1];
+ if (cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1neg, c + l2);
+ }
+ //Noise Contrastive Estimation
+ if (nce > 0) for (d = 0; d < nce + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * layer1_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1nce[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else {
+ f = exp(f);
+ g = (label - f/(noise_distribution[target]*nce + f)) * alpha;
+ }
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1nce[c + l2];
+ for (c = 0; c < layer1_size; c++) syn1nce[c + l2] += g * syn0[c + l1];
+ if (cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1nce, c + l2);
+ }
+ // Learn weights input -> hidden
+ for (c = 0; c < layer1_size; c++) syn0[c + l1] += neu1e[c];
+ }
+ }
+ else if(type == 2){ //train the cwindow architecture
+ // in -> hidden
+ cw = 0;
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ window_offset = a*layer1_size;
+ if (a > window) window_offset-=layer1_size;
+ for (c = 0; c < layer1_size; c++) neu1[c+window_offset] += syn0[c + last_word * layer1_size];
+ cw++;
+ }
+ if (cw) {
+ if (hs) for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * window_layer_size;
+ // Propagate hidden -> output
+ for (c = 0; c < window_layer_size; c++) f += neu1[c] * syn1_window[c + l2];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < window_layer_size; c++) neu1e[c] += g * syn1_window[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < window_layer_size; c++) syn1_window[c + l2] += g * neu1[c];
+ if (cap == 1) for (c = 0; c < window_layer_size; c++) capParam(syn1_window, c + l2);
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * window_layer_size;
+ f = 0;
+ for (c = 0; c < window_layer_size; c++) f += neu1[c] * syn1neg_window[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
+ for (c = 0; c < window_layer_size; c++) neu1e[c] += g * syn1neg_window[c + l2];
+ for (c = 0; c < window_layer_size; c++) syn1neg_window[c + l2] += g * neu1[c];
+ if(cap == 1) for (c = 0; c < window_layer_size; c++) capParam(syn1neg_window, c + l2);
+ }
+ // Noise Contrastive Estimation
+ if (nce > 0) for (d = 0; d < nce + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * window_layer_size;
+ f = 0;
+ for (c = 0; c < window_layer_size; c++) f += neu1[c] * syn1nce_window[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else {
+ f = exp(f);
+ g = (label - f/(noise_distribution[target]*nce + f)) * alpha;
+ }
+ for (c = 0; c < window_layer_size; c++) neu1e[c] += g * syn1nce_window[c + l2];
+ for (c = 0; c < window_layer_size; c++) syn1nce_window[c + l2] += g * neu1[c];
+ if(cap == 1) for (c = 0; c < window_layer_size; c++) capParam(syn1nce_window, c + l2);
+ }
+ // hidden -> in
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ window_offset = a * layer1_size;
+ if(a > window) window_offset -= layer1_size;
+ for (c = 0; c < layer1_size; c++) syn0[c + last_word * layer1_size] += neu1e[c + window_offset];
+ }
+ }
+ }
+ else if (type == 3){ //train structured skip-gram
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ l1 = last_word * layer1_size;
+ window_offset = a * layer1_size;
+ if(a > window) window_offset -= layer1_size;
+ for (c = 0; c < layer1_size; c++) neu1e[c] = 0;
+ // HIERARCHICAL SOFTMAX
+ if (hs) for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * window_layer_size;
+ // Propagate hidden -> output
+ for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1_window[c + l2 + window_offset];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1_window[c + l2 + window_offset];
+ // Learn weights hidden -> output
+ for (c = 0; c < layer1_size; c++) syn1[c + l2 + window_offset] += g * syn0[c + l1];
+ if(cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1, c + l2 + window_offset);
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * window_layer_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1neg_window[c + l2 + window_offset];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg_window[c + l2 + window_offset];
+ for (c = 0; c < layer1_size; c++) syn1neg_window[c + l2 + window_offset] += g * syn0[c + l1];
+ if(cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1neg_window, c + l2 + window_offset);
+ }
+ // Noise Constrastive Estimation
+ if (nce > 0) for (d = 0; d < nce + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * window_layer_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1nce_window[c + l2 + window_offset];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else {
+ f = exp(f);
+ g = (label - f/(noise_distribution[target]*nce + f)) * alpha;
+ }
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1nce_window[c + l2 + window_offset];
+ for (c = 0; c < layer1_size; c++) syn1nce_window[c + l2 + window_offset] += g * syn0[c + l1];
+ if (cap == 1) for (c = 0; c < layer1_size; c++) capParam(syn1nce_window, c + l2 + window_offset);
+ }
+ // Learn weights input -> hidden
+ for (c = 0; c < layer1_size; c++) {syn0[c + l1] += neu1e[c]; if(syn0[c + l1] > 50) syn0[c + l1] = 50; if(syn0[c + l1] < -50) syn0[c + l1] = -50;}
+ }
+ }
+ else if(type == 4){ //training senna
+ // in -> hidden
+ cw = 0;
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ window_offset = a*layer1_size;
+ if (a > window) window_offset-=layer1_size;
+ for (c = 0; c < layer1_size; c++) neu1[c+window_offset] += syn0[c + last_word * layer1_size];
+ cw++;
+ }
+ if (cw) {
+ for (a = 0; a < window_hidden_size; a++){
+ c = a*window_layer_size;
+ for(b = 0; b < window_layer_size; b++){
+ neu2[a] += syn_window_hidden[c + b] * neu1[b];
+ }
+ }
+ if (hs) for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * window_hidden_size;
+ // Propagate hidden -> output
+ for (c = 0; c < window_hidden_size; c++) f += hardTanh(neu2[c]) * syn_hidden_word[c + l2];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < window_hidden_size; c++) neu2e[c] += dHardTanh(neu2[c],g) * g * syn_hidden_word[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < window_hidden_size; c++) syn_hidden_word[c + l2] += dHardTanh(neu2[c],g) * g * neu2[c];
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * window_hidden_size;
+ f = 0;
+ for (c = 0; c < window_hidden_size; c++) f += hardTanh(neu2[c]) * syn_hidden_word_neg[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha / negative;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha / negative;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha / negative;
+ for (c = 0; c < window_hidden_size; c++) neu2e[c] += dHardTanh(neu2[c],g) * g * syn_hidden_word_neg[c + l2];
+ for (c = 0; c < window_hidden_size; c++) syn_hidden_word_neg[c + l2] += dHardTanh(neu2[c],g) * g * neu2[c];
+ }
+ for (a = 0; a < window_hidden_size; a++)
+ for(b = 0; b < window_layer_size; b++)
+ neu1e[b] += neu2e[a] * syn_window_hidden[a*window_layer_size + b];
+ for (a = 0; a < window_hidden_size; a++)
+ for(b = 0; b < window_layer_size; b++)
+ syn_window_hidden[a*window_layer_size + b] += neu2e[a] * neu1[b];
+ // hidden -> in
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ window_offset = a * layer1_size;
+ if(a > window) window_offset -= layer1_size;
+ for (c = 0; c < layer1_size; c++) syn0[c + last_word * layer1_size] += neu1e[c + window_offset];
+ }
+ }
+ }
+ else{
+ printf("unknown type %i", type);
+ exit(0);
+ }
+ sentence_position++;
+ if (sentence_position >= sentence_length) {
+ sentence_length = 0;
+ continue;
+ }
+ }
+ fclose(fi);
+ free(neu1);
+ free(neu1e);
+ pthread_exit(NULL);
+}
+
+void TrainModel() {
+ long a, b, c, d;
+ FILE *fo;
+ pthread_t *pt = (pthread_t *)malloc(num_threads * sizeof(pthread_t));
+ printf("Starting training using file %s\n", train_file);
+ starting_alpha = alpha;
+ if (read_vocab_file[0] != 0) ReadVocab(); else LearnVocabFromTrainFile();
+ if (save_vocab_file[0] != 0) SaveVocab();
+ if (output_file[0] == 0) return;
+ InitNet();
+ if (negative > 0 || nce > 0) InitUnigramTable();
+ if (negative_classes_file[0] != 0) InitClassUnigramTable();
+ start = clock();
+ for (a = 0; a < num_threads; a++) pthread_create(&pt[a], NULL, TrainModelThread, (void *)a);
+ for (a = 0; a < num_threads; a++) pthread_join(pt[a], NULL);
+ fo = fopen(output_file, "wb");
+ if (classes == 0) {
+ // Save the word vectors
+ fprintf(fo, "%lld %lld\n", vocab_size, layer1_size);
+ for (a = 0; a < vocab_size; a++) {
+ fprintf(fo, "%s ", vocab[a].word);
+ if (binary) for (b = 0; b < layer1_size; b++) fwrite(&syn0[a * layer1_size + b], sizeof(real), 1, fo);
+ else for (b = 0; b < layer1_size; b++) fprintf(fo, "%lf ", syn0[a * layer1_size + b]);
+ fprintf(fo, "\n");
+ }
+ } else {
+ // Run K-means on the word vectors
+ int clcn = classes, iter = 10, closeid;
+ int *centcn = (int *)malloc(classes * sizeof(int));
+ int *cl = (int *)calloc(vocab_size, sizeof(int));
+ real closev, x;
+ real *cent = (real *)calloc(classes * layer1_size, sizeof(real));
+ for (a = 0; a < vocab_size; a++) cl[a] = a % clcn;
+ for (a = 0; a < iter; a++) {
+ for (b = 0; b < clcn * layer1_size; b++) cent[b] = 0;
+ for (b = 0; b < clcn; b++) centcn[b] = 1;
+ for (c = 0; c < vocab_size; c++) {
+ for (d = 0; d < layer1_size; d++) cent[layer1_size * cl[c] + d] += syn0[c * layer1_size + d];
+ centcn[cl[c]]++;
+ }
+ for (b = 0; b < clcn; b++) {
+ closev = 0;
+ for (c = 0; c < layer1_size; c++) {
+ cent[layer1_size * b + c] /= centcn[b];
+ closev += cent[layer1_size * b + c] * cent[layer1_size * b + c];
+ }
+ closev = sqrt(closev);
+ for (c = 0; c < layer1_size; c++) cent[layer1_size * b + c] /= closev;
+ }
+ for (c = 0; c < vocab_size; c++) {
+ closev = -10;
+ closeid = 0;
+ for (d = 0; d < clcn; d++) {
+ x = 0;
+ for (b = 0; b < layer1_size; b++) x += cent[layer1_size * d + b] * syn0[c * layer1_size + b];
+ if (x > closev) {
+ closev = x;
+ closeid = d;
+ }
+ }
+ cl[c] = closeid;
+ }
+ }
+ // Save the K-means classes
+ for (a = 0; a < vocab_size; a++) fprintf(fo, "%s %d\n", vocab[a].word, cl[a]);
+ free(centcn);
+ free(cent);
+ free(cl);
+ }
+ fclose(fo);
+}
+
+int ArgPos(char *str, int argc, char **argv) {
+ int a;
+ for (a = 1; a < argc; a++) if (!strcmp(str, argv[a])) {
+ if (a == argc - 1) {
+ printf("Argument missing for %s\n", str);
+ exit(1);
+ }
+ return a;
+ }
+ return -1;
+}
+
+int main(int argc, char **argv) {
+ int i;
+ if (argc == 1) {
+ printf("WORD VECTOR estimation toolkit v 0.1c\n\n");
+ printf("Options:\n");
+ printf("Parameters for training:\n");
+ printf("\t-train <file>\n");
+ printf("\t\tUse text data from <file> to train the model\n");
+ printf("\t-output <file>\n");
+ printf("\t\tUse <file> to save the resulting word vectors / word clusters\n");
+ printf("\t-size <int>\n");
+ printf("\t\tSet size of word vectors; default is 100\n");
+ printf("\t-window <int>\n");
+ printf("\t\tSet max skip length between words; default is 5\n");
+ printf("\t-sample <float>\n");
+ printf("\t\tSet threshold for occurrence of words. Those that appear with higher frequency in the training data\n");
+ printf("\t\twill be randomly down-sampled; default is 1e-3, useful range is (0, 1e-5)\n");
+ printf("\t-hs <int>\n");
+ printf("\t\tUse Hierarchical Softmax; default is 0 (not used)\n");
+ printf("\t-negative <int>\n");
+ printf("\t\tNumber of negative examples; default is 5, common values are 3 - 10 (0 = not used)\n");
+ printf("\t-negative-classes <file>\n");
+ printf("\t\tNegative classes to sample from\n");
+ printf("\t-nce <int>\n");
+ printf("\t\tNumber of negative examples for nce; default is 0, common values are 3 - 10 (0 = not used)\n");
+ printf("\t-threads <int>\n");
+ printf("\t\tUse <int> threads (default 12)\n");
+ printf("\t-iter <int>\n");
+ printf("\t\tRun more training iterations (default 5)\n");
+ printf("\t-min-count <int>\n");
+ printf("\t\tThis will discard words that appear less than <int> times; default is 5\n");
+ printf("\t-alpha <float>\n");
+ printf("\t\tSet the starting learning rate; default is 0.025 for skip-gram and 0.05 for CBOW\n");
+ printf("\t-classes <int>\n");
+ printf("\t\tOutput word classes rather than word vectors; default number of classes is 0 (vectors are written)\n");
+ printf("\t-debug <int>\n");
+ printf("\t\tSet the debug mode (default = 2 = more info during training)\n");
+ printf("\t-binary <int>\n");
+ printf("\t\tSave the resulting vectors in binary moded; default is 0 (off)\n");
+ printf("\t-save-vocab <file>\n");
+ printf("\t\tThe vocabulary will be saved to <file>\n");
+ printf("\t-read-vocab <file>\n");
+ printf("\t\tThe vocabulary will be read from <file>, not constructed from the training data\n");
+ printf("\t-type <int>\n");
+ printf("\t\tType of embeddings (0 for cbow, 1 for skipngram, 2 for cwindow, 3 for structured skipngram, 4 for senna type)\n");
+ printf("\t-cap <int>\n");
+ printf("\t\tlimit the parameter values to the range [-50, 50]; default is 0 (off)\n");
+ printf("\nExamples:\n");
+ printf("./word2vec -train data.txt -output vec.txt -size 200 -window 5 -sample 1e-4 -negative 5 -hs 0 -binary 0 -type 1 -iter 3\n\n");
+ return 0;
+ }
+ output_file[0] = 0;
+ save_vocab_file[0] = 0;
+ read_vocab_file[0] = 0;
+ negative_classes_file[0] = 0;
+ if ((i = ArgPos((char *)"-size", argc, argv)) > 0) layer1_size = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-train", argc, argv)) > 0) strcpy(train_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-save-vocab", argc, argv)) > 0) strcpy(save_vocab_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-read-vocab", argc, argv)) > 0) strcpy(read_vocab_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-debug", argc, argv)) > 0) debug_mode = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-binary", argc, argv)) > 0) binary = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-type", argc, argv)) > 0) type = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-output", argc, argv)) > 0) strcpy(output_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-window", argc, argv)) > 0) window = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-sample", argc, argv)) > 0) sample = atof(argv[i + 1]);
+ if ((i = ArgPos((char *)"-hs", argc, argv)) > 0) hs = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-negative", argc, argv)) > 0) negative = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-negative-classes", argc, argv)) > 0) strcpy(negative_classes_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-nce", argc, argv)) > 0) nce = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-threads", argc, argv)) > 0) num_threads = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-iter", argc, argv)) > 0) iter = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-min-count", argc, argv)) > 0) min_count = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-classes", argc, argv)) > 0) classes = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-cap", argc, argv)) > 0) cap = atoi(argv[i + 1]);
+ if (type==0 || type==2 || type==4) alpha = 0.05;
+ if ((i = ArgPos((char *)"-alpha", argc, argv)) > 0) alpha = atof(argv[i + 1]);
+ vocab = (struct vocab_word *)calloc(vocab_max_size, sizeof(struct vocab_word));
+ vocab_hash = (int *)calloc(vocab_hash_size, sizeof(int));
+ expTable = (real *)malloc((EXP_TABLE_SIZE + 1) * sizeof(real));
+ for (i = 0; i < EXP_TABLE_SIZE; i++) {
+ expTable[i] = exp((i / (real)EXP_TABLE_SIZE * 2 - 1) * MAX_EXP); // Precompute the exp() table
+ expTable[i] = expTable[i] / (expTable[i] + 1); // Precompute f(x) = x / (x + 1)
+ }
+ TrainModel();
+ return 0;
+}
+
diff --git a/word2vecExt.c b/word2vecExt.c
new file mode 100644
index 0000000..88d7ef0
--- /dev/null
+++ b/word2vecExt.c
@@ -0,0 +1,1830 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include <pthread.h>
+
+#define MAX_STRING 100
+#define EXP_TABLE_SIZE 1000
+#define MAX_EXP 6
+#define MAX_SENTENCE_LENGTH 1000
+#define MAX_CODE_LENGTH 40
+
+const int vocab_hash_size = 30000000; // Maximum 30 * 0.7 = 21M words in the vocabulary
+
+typedef float real; // Precision of float numbers
+
+struct vocab_word {
+ long long cn;
+ int *point;
+ char *word, *code, codelen;
+};
+
+char train_file[MAX_STRING], output_file[MAX_STRING];
+char save_vocab_file[MAX_STRING], read_vocab_file[MAX_STRING];
+char save_net_file[MAX_STRING], read_net_file[MAX_STRING];
+struct vocab_word *vocab;
+int binary = 0, type = 1, debug_mode = 2, window = 5, min_count = 5,
+ num_threads = 12, min_reduce = 1;
+int *vocab_hash;
+long long vocab_max_size = 1000, vocab_size = 0, layer1_size = 100;
+long long train_words = 0, word_count_actual = 0, iter = 5, file_size = 0,
+ classes = 0;
+real alpha = 0.025, starting_alpha, sample = 1e-3;
+real *syn0, *syn1, *syn1neg, *syn1nce, *expTable;
+clock_t start;
+
+real *syn1_window, *syn1neg_window, *syn1nce_window;
+int w_offset, window_layer_size;
+
+int window_hidden_size = 500;
+real *syn_window_hidden, *syn_hidden_word, *syn_hidden_word_neg,
+ *syn_hidden_word_nce;
+
+int hs = 0, negative = 5;
+const int table_size = 1e8;
+int *table;
+
+//constrastive negative sampling
+char negative_classes_file[MAX_STRING];
+int *word_to_group;
+int *group_to_table; //group_size*table_size
+int class_number;
+
+//nce
+real* noise_distribution;
+int nce = 0;
+
+//param caps
+real CAP_VALUE = 50;
+int cap = 0;
+
+void capParam(real* array, int index) {
+ if (array[index] > CAP_VALUE)
+ array[index] = CAP_VALUE;
+ else if (array[index] < -CAP_VALUE)
+ array[index] = -CAP_VALUE;
+}
+
+real hardTanh(real x) {
+ if (x >= 1) {
+ return 1;
+ } else if (x <= -1) {
+ return -1;
+ } else {
+ return x;
+ }
+}
+
+real dHardTanh(real x, real g) {
+ if (x > 1 && g > 0) {
+ return 0;
+ }
+ if (x < -1 && g < 0) {
+ return 0;
+ }
+ return 1;
+}
+
+void InitUnigramTable() {
+ int a, i;
+ long long train_words_pow = 0;
+ real d1, power = 0.75;
+ table = (int *) malloc(table_size * sizeof(int));
+ for (a = 0; a < vocab_size; a++)
+ train_words_pow += pow(vocab[a].cn, power);
+ i = 0;
+ d1 = pow(vocab[i].cn, power) / (real) train_words_pow;
+ for (a = 0; a < table_size; a++) {
+ table[a] = i;
+ if (a / (real) table_size > d1) {
+ i++;
+ d1 += pow(vocab[i].cn, power) / (real) train_words_pow;
+ }
+ if (i >= vocab_size)
+ i = vocab_size - 1;
+ }
+
+ noise_distribution = (real *) calloc(vocab_size, sizeof(real));
+ for (a = 0; a < vocab_size; a++)
+ noise_distribution[a] = pow(vocab[a].cn, power)
+ / (real) train_words_pow;
+}
+
+// Reads a single word from a file, assuming space + tab + EOL to be word boundaries
+void ReadWord(char *word, FILE *fin) {
+ int a = 0, ch;
+ while (!feof(fin)) {
+ ch = fgetc(fin);
+ if (ch == 13)
+ continue;
+ if ((ch == ' ') || (ch == '\t') || (ch == '\n')) {
+ if (a > 0) {
+ if (ch == '\n')
+ ungetc(ch, fin);
+ break;
+ }
+ if (ch == '\n') {
+ strcpy(word, (char *) "</s>");
+ return;
+ } else
+ continue;
+ }
+ word[a] = ch;
+ a++;
+ if (a >= MAX_STRING - 1)
+ a--; // Truncate too long words
+ }
+ word[a] = 0;
+}
+
+// Returns hash value of a word
+int GetWordHash(char *word) {
+ unsigned long long a, hash = 0;
+ for (a = 0; a < strlen(word); a++)
+ hash = hash * 257 + word[a];
+ hash = hash % vocab_hash_size;
+ return hash;
+}
+
+// Returns position of a word in the vocabulary; if the word is not found, returns -1
+int SearchVocab(char *word) {
+ unsigned int hash = GetWordHash(word);
+ while (1) {
+ if (vocab_hash[hash] == -1)
+ return -1;
+ if (!strcmp(word, vocab[vocab_hash[hash]].word))
+ return vocab_hash[hash];
+ hash = (hash + 1) % vocab_hash_size;
+ }
+ return -1;
+}
+
+// Reads a word and returns its index in the vocabulary
+int ReadWordIndex(FILE *fin) {
+ char word[MAX_STRING];
+ ReadWord(word, fin);
+ if (feof(fin))
+ return -1;
+ return SearchVocab(word);
+}
+
+// Adds a word to the vocabulary
+int AddWordToVocab(char *word) {
+ unsigned int hash, length = strlen(word) + 1;
+ if (length > MAX_STRING)
+ length = MAX_STRING;
+ vocab[vocab_size].word = (char *) calloc(length, sizeof(char));
+ strcpy(vocab[vocab_size].word, word);
+ vocab[vocab_size].cn = 0;
+ vocab_size++;
+ // Reallocate memory if needed
+ if (vocab_size + 2 >= vocab_max_size) {
+ vocab_max_size += 1000;
+ vocab = (struct vocab_word *) realloc(vocab,
+ vocab_max_size * sizeof(struct vocab_word));
+ }
+ hash = GetWordHash(word);
+ while (vocab_hash[hash] != -1)
+ hash = (hash + 1) % vocab_hash_size;
+ vocab_hash[hash] = vocab_size - 1;
+ return vocab_size - 1;
+}
+
+// Used later for sorting by word counts
+int VocabCompare(const void *a, const void *b) {
+ return ((struct vocab_word *) b)->cn - ((struct vocab_word *) a)->cn;
+}
+
+// Sorts the vocabulary by frequency using word counts
+void SortVocab() {
+ int a, size;
+ unsigned int hash;
+ // Sort the vocabulary and keep </s> at the first position
+ qsort(&vocab[1], vocab_size - 1, sizeof(struct vocab_word), VocabCompare);
+ for (a = 0; a < vocab_hash_size; a++)
+ vocab_hash[a] = -1;
+ size = vocab_size;
+ train_words = 0;
+ for (a = 0; a < size; a++) {
+ // Words occuring less than min_count times will be discarded from the vocab
+ if ((vocab[a].cn < min_count) && (a != 0)) {
+ vocab_size--;
+ free(vocab[a].word);
+ } else {
+ // Hash will be re-computed, as after the sorting it is not actual
+ hash = GetWordHash(vocab[a].word);
+ while (vocab_hash[hash] != -1)
+ hash = (hash + 1) % vocab_hash_size;
+ vocab_hash[hash] = a;
+ train_words += vocab[a].cn;
+ }
+ }
+ vocab = (struct vocab_word *) realloc(vocab,
+ (vocab_size + 1) * sizeof(struct vocab_word));
+ // Allocate memory for the binary tree construction
+ for (a = 0; a < vocab_size; a++) {
+ vocab[a].code = (char *) calloc(MAX_CODE_LENGTH, sizeof(char));
+ vocab[a].point = (int *) calloc(MAX_CODE_LENGTH, sizeof(int));
+ }
+}
+
+// Reduces the vocabulary by removing infrequent tokens
+void ReduceVocab() {
+ int a, b = 0;
+ unsigned int hash;
+ for (a = 0; a < vocab_size; a++)
+ if (vocab[a].cn > min_reduce) {
+ vocab[b].cn = vocab[a].cn;
+ vocab[b].word = vocab[a].word;
+ b++;
+ } else
+ free(vocab[a].word);
+ vocab_size = b;
+ for (a = 0; a < vocab_hash_size; a++)
+ vocab_hash[a] = -1;
+ for (a = 0; a < vocab_size; a++) {
+ // Hash will be re-computed, as it is not actual
+ hash = GetWordHash(vocab[a].word);
+ while (vocab_hash[hash] != -1)
+ hash = (hash + 1) % vocab_hash_size;
+ vocab_hash[hash] = a;
+ }
+ fflush(stdout);
+ min_reduce++;
+}
+
+// Create binary Huffman tree using the word counts
+// Frequent words will have short uniqe binary codes
+void CreateBinaryTree() {
+ long long a, b, i, min1i, min2i, pos1, pos2, point[MAX_CODE_LENGTH];
+ char code[MAX_CODE_LENGTH];
+ long long *count = (long long *) calloc(vocab_size * 2 + 1,
+ sizeof(long long));
+ long long *binary = (long long *) calloc(vocab_size * 2 + 1,
+ sizeof(long long));
+ long long *parent_node = (long long *) calloc(vocab_size * 2 + 1,
+ sizeof(long long));
+ for (a = 0; a < vocab_size; a++)
+ count[a] = vocab[a].cn;
+ for (a = vocab_size; a < vocab_size * 2; a++)
+ count[a] = 1e15;
+ pos1 = vocab_size - 1;
+ pos2 = vocab_size;
+ // Following algorithm constructs the Huffman tree by adding one node at a time
+ for (a = 0; a < vocab_size - 1; a++) {
+ // First, find two smallest nodes 'min1, min2'
+ if (pos1 >= 0) {
+ if (count[pos1] < count[pos2]) {
+ min1i = pos1;
+ pos1--;
+ } else {
+ min1i = pos2;
+ pos2++;
+ }
+ } else {
+ min1i = pos2;
+ pos2++;
+ }
+ if (pos1 >= 0) {
+ if (count[pos1] < count[pos2]) {
+ min2i = pos1;
+ pos1--;
+ } else {
+ min2i = pos2;
+ pos2++;
+ }
+ } else {
+ min2i = pos2;
+ pos2++;
+ }
+ count[vocab_size + a] = count[min1i] + count[min2i];
+ parent_node[min1i] = vocab_size + a;
+ parent_node[min2i] = vocab_size + a;
+ binary[min2i] = 1;
+ }
+ // Now assign binary code to each vocabulary word
+ for (a = 0; a < vocab_size; a++) {
+ b = a;
+ i = 0;
+ while (1) {
+ code[i] = binary[b];
+ point[i] = b;
+ i++;
+ b = parent_node[b];
+ if (b == vocab_size * 2 - 2)
+ break;
+ }
+ vocab[a].codelen = i;
+ vocab[a].point[0] = vocab_size - 2;
+ for (b = 0; b < i; b++) {
+ vocab[a].code[i - b - 1] = code[b];
+ vocab[a].point[i - b] = point[b] - vocab_size;
+ }
+ }
+ free(count);
+ free(binary);
+ free(parent_node);
+}
+
+void LearnVocabFromTrainFile() {
+ char word[MAX_STRING];
+ FILE *fin;
+ long long a, i;
+ for (a = 0; a < vocab_hash_size; a++)
+ vocab_hash[a] = -1;
+ fin = fopen(train_file, "rb");
+ if (fin == NULL) {
+ printf("ERROR: training data file not found!\n");
+ exit(1);
+ }
+ vocab_size = 0;
+ AddWordToVocab((char *) "</s>");
+ while (1) {
+ ReadWord(word, fin);
+ if (feof(fin))
+ break;
+ train_words++;
+ if ((debug_mode > 1) && (train_words % 100000 == 0)) {
+ printf("%lldK%c", train_words / 1000, 13);
+ fflush(stdout);
+ }
+ i = SearchVocab(word);
+ if (i == -1) {
+ a = AddWordToVocab(word);
+ vocab[a].cn = 1;
+ } else
+ vocab[i].cn++;
+ if (vocab_size > vocab_hash_size * 0.7)
+ ReduceVocab();
+ }
+ SortVocab();
+ if (debug_mode > 0) {
+ printf("Vocab size: %lld\n", vocab_size);
+ printf("Words in train file: %lld\n", train_words);
+ }
+ file_size = ftell(fin);
+ fclose(fin);
+}
+
+void SaveVocab() {
+ long long i;
+ FILE *fo = fopen(save_vocab_file, "wb");
+ for (i = 0; i < vocab_size; i++)
+ fprintf(fo, "%s %lld\n", vocab[i].word, vocab[i].cn);
+ fclose(fo);
+}
+
+void ReadVocab() {
+ long long a, i = 0;
+ char c;
+ char word[MAX_STRING];
+ FILE *fin = fopen(read_vocab_file, "rb");
+ if (fin == NULL) {
+ printf("Vocabulary file not found\n");
+ exit(1);
+ }
+ for (a = 0; a < vocab_hash_size; a++)
+ vocab_hash[a] = -1;
+ vocab_size = 0;
+ while (1) {
+ ReadWord(word, fin);
+ if (feof(fin))
+ break;
+ a = AddWordToVocab(word);
+ fscanf(fin, "%lld%c", &vocab[a].cn, &c);
+ i++;
+ }
+ SortVocab();
+ if (debug_mode > 0) {
+ printf("Vocab size: %lld\n", vocab_size);
+ printf("Words in train file: %lld\n", train_words);
+ }
+ fin = fopen(train_file, "rb");
+ if (fin == NULL) {
+ printf("ERROR: training data file not found!\n");
+ exit(1);
+ }
+ fseek(fin, 0, SEEK_END);
+ file_size = ftell(fin);
+ fclose(fin);
+}
+
+void InitClassUnigramTable() {
+ long long a, c;
+ printf("loading class unigrams \n");
+ FILE *fin = fopen(negative_classes_file, "rb");
+ if (fin == NULL) {
+ printf("ERROR: class file not found!\n");
+ exit(1);
+ }
+ word_to_group = (int *) malloc(vocab_size * sizeof(int));
+ for (a = 0; a < vocab_size; a++)
+ word_to_group[a] = -1;
+ char class[MAX_STRING];
+ char prev_class[MAX_STRING];
+ prev_class[0] = 0;
+ char word[MAX_STRING];
+ class_number = -1;
+ while (1) {
+ if (feof(fin))
+ break;
+ ReadWord(class, fin);
+ ReadWord(word, fin);
+ int word_index = SearchVocab(word);
+ if (word_index != -1) {
+ if (strcmp(class, prev_class) != 0) {
+ class_number++;
+ strcpy(prev_class, class);
+ }
+ word_to_group[word_index] = class_number;
+ }
+ ReadWord(word, fin);
+ }
+ class_number++;
+ fclose(fin);
+
+ group_to_table = (int *) malloc(table_size * class_number * sizeof(int));
+ long long train_words_pow = 0;
+ real d1, power = 0.75;
+
+ for (c = 0; c < class_number; c++) {
+ long long offset = c * table_size;
+ train_words_pow = 0;
+ for (a = 0; a < vocab_size; a++)
+ if (word_to_group[a] == c)
+ train_words_pow += pow(vocab[a].cn, power);
+ int i = 0;
+ while (word_to_group[i] != c && i < vocab_size)
+ i++;
+ d1 = pow(vocab[i].cn, power) / (real) train_words_pow;
+ for (a = 0; a < table_size; a++) {
+ //printf("index %lld , word %d\n", a, i);
+ group_to_table[offset + a] = i;
+ if (a / (real) table_size > d1) {
+ i++;
+ while (word_to_group[i] != c && i < vocab_size)
+ i++;
+ d1 += pow(vocab[i].cn, power) / (real) train_words_pow;
+ }
+ if (i >= vocab_size)
+ while (word_to_group[i] != c && i >= 0)
+ i--;
+ }
+ }
+}
+
+void SaveNet() {
+ long long a, b;
+ FILE *fnet = fopen(save_net_file, "wb");
+ if (fnet == NULL) {
+ printf("Net parameter file not found\n");
+ exit(1);
+ }
+ for (a = 0; a < vocab_size; a++)
+ for (b = 0; b < layer1_size; b++) {
+ fwrite(&syn0[a * layer1_size + b], sizeof(real), 1, fnet);
+ }
+ for (a = 0; a < window_hidden_size * window_layer_size; a++) {
+ fwrite(&syn_window_hidden[a],sizeof(real),1,fnet);
+ }
+ fclose(fnet);
+}
+
+void InitNet() {
+ long long a, b;
+ unsigned long long next_random = 1;
+ window_layer_size = layer1_size * window * 2;
+ a = posix_memalign((void **) &syn0, 128,
+ (long long) vocab_size * layer1_size * sizeof(real));
+ if (syn0 == NULL) {
+ printf("Memory allocation failed\n");
+ exit(1);
+ }
+
+ if (hs) {
+ a = posix_memalign((void **) &syn1, 128,
+ (long long) vocab_size * layer1_size * sizeof(real));
+ if (syn1 == NULL) {
+ printf("Memory allocation failed\n");
+ exit(1);
+ }
+ a = posix_memalign((void **) &syn1_window, 128,
+ (long long) vocab_size * window_layer_size * sizeof(real));
+ if (syn1_window == NULL) {
+ printf("Memory allocation failed\n");
+ exit(1);
+ }
+ a = posix_memalign((void **) &syn_hidden_word, 128,
+ (long long) vocab_size * window_hidden_size * sizeof(real));
+ if (syn_hidden_word == NULL) {
+ printf("Memory allocation failed\n");
+ exit(1);
+ }
+
+ for (a = 0; a < vocab_size; a++)
+ for (b = 0; b < layer1_size; b++)
+ syn1[a * layer1_size + b] = 0;
+ for (a = 0; a < vocab_size; a++)
+ for (b = 0; b < window_layer_size; b++)
+ syn1_window[a * window_layer_size + b] = 0;
+ for (a = 0; a < vocab_size; a++)
+ for (b = 0; b < window_hidden_size; b++)
+ syn_hidden_word[a * window_hidden_size + b] = 0;
+ }
+ if (negative > 0) {
+ a = posix_memalign((void **) &syn1neg, 128,
+ (long long) vocab_size * layer1_size * sizeof(real));
+ if (syn1neg == NULL) {
+ printf("Memory allocation failed\n");
+ exit(1);
+ }
+ a = posix_memalign((void **) &syn1neg_window, 128,
+ (long long) vocab_size * window_layer_size * sizeof(real));
+ if (syn1neg_window == NULL) {
+ printf("Memory allocation failed\n");
+ exit(1);
+ }
+ a = posix_memalign((void **) &syn_hidden_word_neg, 128,
+ (long long) vocab_size * window_hidden_size * sizeof(real));
+ if (syn_hidden_word_neg == NULL) {
+ printf("Memory allocation failed\n");
+ exit(1);
+ }
+
+ for (a = 0; a < vocab_size; a++)
+ for (b = 0; b < layer1_size; b++)
+ syn1neg[a * layer1_size + b] = 0;
+ for (a = 0; a < vocab_size; a++)
+ for (b = 0; b < window_layer_size; b++)
+ syn1neg_window[a * window_layer_size + b] = 0;
+ for (a = 0; a < vocab_size; a++)
+ for (b = 0; b < window_hidden_size; b++)
+ syn_hidden_word_neg[a * window_hidden_size + b] = 0;
+ }
+ if (nce > 0) {
+ a = posix_memalign((void **) &syn1nce, 128,
+ (long long) vocab_size * layer1_size * sizeof(real));
+ if (syn1nce == NULL) {
+ printf("Memory allocation failed\n");
+ exit(1);
+ }
+ a = posix_memalign((void **) &syn1nce_window, 128,
+ (long long) vocab_size * window_layer_size * sizeof(real));
+ if (syn1nce_window == NULL) {
+ printf("Memory allocation failed\n");
+ exit(1);
+ }
+ a = posix_memalign((void **) &syn_hidden_word_nce, 128,
+ (long long) vocab_size * window_hidden_size * sizeof(real));
+ if (syn_hidden_word_nce == NULL) {
+ printf("Memory allocation failed\n");
+ exit(1);
+ }
+
+ for (a = 0; a < vocab_size; a++)
+ for (b = 0; b < layer1_size; b++)
+ syn1nce[a * layer1_size + b] = 0;
+ for (a = 0; a < vocab_size; a++)
+ for (b = 0; b < window_layer_size; b++)
+ syn1nce_window[a * window_layer_size + b] = 0;
+ for (a = 0; a < vocab_size; a++)
+ for (b = 0; b < window_hidden_size; b++)
+ syn_hidden_word_nce[a * window_hidden_size + b] = 0;
+ }
+ if (read_net_file[0] == 0) {
+ for (a = 0; a < vocab_size; a++)
+ for (b = 0; b < layer1_size; b++) {
+ next_random = next_random * (unsigned long long) 25214903917
+ + 11;
+ syn0[a * layer1_size + b] = (((next_random & 0xFFFF)
+ / (real) 65536) - 0.5) / layer1_size;
+ }
+
+ a = posix_memalign((void **) &syn_window_hidden, 128,
+ window_hidden_size * window_layer_size * sizeof(real));
+ if (syn_window_hidden == NULL) {
+ printf("Memory allocation failed\n");
+ exit(1);
+ }
+ for (a = 0; a < window_hidden_size * window_layer_size; a++) {
+ next_random = next_random * (unsigned long long) 25214903917 + 11;
+ syn_window_hidden[a] = (((next_random & 0xFFFF) / (real) 65536)
+ - 0.5) / (window_hidden_size * window_layer_size);
+ }
+ }
+ else {
+ FILE *fnet = fopen(read_net_file, "rb");
+ if (fnet == NULL) {
+ printf("Net parameter file not found\n");
+ exit(1);
+ }
+ for (a = 0; a < vocab_size; a++)
+ for (b = 0; b < layer1_size; b++) {
+ fread(&syn0[a * layer1_size + b], sizeof(real), 1, fnet);
+ }
+
+ a = posix_memalign((void **) &syn_window_hidden, 128,
+ window_hidden_size * window_layer_size * sizeof(real));
+ if (syn_window_hidden == NULL) {
+ printf("Memory allocation failed\n");
+ exit(1);
+ }
+ for (a = 0; a < window_hidden_size * window_layer_size; a++) {
+ fread(&syn_window_hidden[a],sizeof(real),1,fnet);
+ }
+ fclose(fnet);
+ }
+
+ CreateBinaryTree();
+}
+
+void *TrainModelThread(void *id) {
+ long long a, b, d, cw, word, last_word, sentence_length = 0,
+ sentence_position = 0;
+ long long word_count = 0, last_word_count = 0, sen[MAX_SENTENCE_LENGTH + 1];
+ long long l1, l2, c, target, label, local_iter = iter;
+ unsigned long long next_random = (long long) id;
+ real f, g;
+ clock_t now;
+ int input_len_1 = layer1_size;
+ int window_offset = -1;
+ if (type == 2 || type == 4) {
+ input_len_1 = window_layer_size;
+ }
+ real *neu1 = (real *) calloc(input_len_1, sizeof(real));
+ real *neu1e = (real *) calloc(input_len_1, sizeof(real));
+
+ int input_len_2 = 0;
+ if (type == 4) {
+ input_len_2 = window_hidden_size;
+ }
+ real *neu2 = (real *) calloc(input_len_2, sizeof(real));
+ real *neu2e = (real *) calloc(input_len_2, sizeof(real));
+
+ FILE *fi = fopen(train_file, "rb");
+ fseek(fi, file_size / (long long) num_threads * (long long) id, SEEK_SET);
+ while (1) {
+ if (word_count - last_word_count > 10000) {
+ word_count_actual += word_count - last_word_count;
+ last_word_count = word_count;
+ if ((debug_mode > 1)) {
+ now = clock();
+ printf(
+ "%cAlpha: %f Progress: %.2f%% Words/thread/sec: %.2fk ",
+ 13, alpha,
+ word_count_actual / (real) (iter * train_words + 1)
+ * 100,
+ word_count_actual
+ / ((real) (now - start + 1)
+ / (real) CLOCKS_PER_SEC * 1000));
+ fflush(stdout);
+ }
+ alpha = starting_alpha
+ * (1 - word_count_actual / (real) (iter * train_words + 1));
+ if (alpha < starting_alpha * 0.0001)
+ alpha = starting_alpha * 0.0001;
+ }
+ if (sentence_length == 0) {
+ while (1) {
+ word = ReadWordIndex(fi);
+ if (feof(fi))
+ break;
+ if (word == -1)
+ continue;
+ word_count++;
+ if (word == 0)
+ break;
+ // The subsampling randomly discards frequent words while keeping the ranking same
+ if (sample > 0) {
+ real ran = (sqrt(vocab[word].cn / (sample * train_words))
+ + 1) * (sample * train_words) / vocab[word].cn;
+ next_random = next_random * (unsigned long long) 25214903917
+ + 11;
+ if (ran < (next_random & 0xFFFF) / (real) 65536)
+ continue;
+ }
+ sen[sentence_length] = word;
+ sentence_length++;
+ if (sentence_length >= MAX_SENTENCE_LENGTH)
+ break;
+ }
+ sentence_position = 0;
+ }
+ if (feof(fi) || (word_count > train_words / num_threads)) {
+ word_count_actual += word_count - last_word_count;
+ local_iter--;
+ if (local_iter == 0)
+ break;
+ word_count = 0;
+ last_word_count = 0;
+ sentence_length = 0;
+ fseek(fi, file_size / (long long) num_threads * (long long) id,
+ SEEK_SET);
+ continue;
+ }
+ word = sen[sentence_position];
+ if (word == -1)
+ continue;
+ for (c = 0; c < input_len_1; c++)
+ neu1[c] = 0;
+ for (c = 0; c < input_len_1; c++)
+ neu1e[c] = 0;
+ for (c = 0; c < input_len_2; c++)
+ neu2[c] = 0;
+ for (c = 0; c < input_len_2; c++)
+ neu2e[c] = 0;
+ next_random = next_random * (unsigned long long) 25214903917 + 11;
+ b = next_random % window;
+ if (type == 0) { //train the cbow architecture
+ // in -> hidden
+ cw = 0;
+ for (a = b; a < window * 2 + 1 - b; a++)
+ if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0)
+ continue;
+ if (c >= sentence_length)
+ continue;
+ last_word = sen[c];
+ if (last_word == -1)
+ continue;
+ for (c = 0; c < layer1_size; c++)
+ neu1[c] += syn0[c + last_word * layer1_size];
+ cw++;
+ }
+ if (cw) {
+ for (c = 0; c < layer1_size; c++)
+ neu1[c] /= cw;
+ if (hs)
+ for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * layer1_size;
+ // Propagate hidden -> output
+ for (c = 0; c < layer1_size; c++)
+ f += neu1[c] * syn1[c + l2];
+ if (f <= -MAX_EXP)
+ continue;
+ else if (f >= MAX_EXP)
+ continue;
+ else
+ f = expTable[(int) ((f + MAX_EXP)
+ * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < layer1_size; c++)
+ neu1e[c] += g * syn1[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < layer1_size; c++)
+ syn1[c + l2] += g * neu1[c];
+ if (cap == 1)
+ for (c = 0; c < layer1_size; c++)
+ capParam(syn1, c + l2);
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0)
+ for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random
+ * (unsigned long long) 25214903917 + 11;
+ if (word_to_group != NULL
+ && word_to_group[word] != -1) {
+ target = word;
+ while (target == word) {
+ target = group_to_table[word_to_group[word]
+ * table_size
+ + (next_random >> 16) % table_size];
+ next_random = next_random
+ * (unsigned long long) 25214903917
+ + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ } else {
+ target =
+ table[(next_random >> 16) % table_size];
+ }
+ if (target == 0)
+ target = next_random % (vocab_size - 1) + 1;
+ if (target == word)
+ continue;
+ label = 0;
+ }
+ l2 = target * layer1_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++)
+ f += neu1[c] * syn1neg[c + l2];
+ if (f > MAX_EXP)
+ g = (label - 1) * alpha;
+ else if (f < -MAX_EXP)
+ g = (label - 0) * alpha;
+ else
+ g = (label
+ - expTable[(int) ((f + MAX_EXP)
+ * (EXP_TABLE_SIZE / MAX_EXP / 2))])
+ * alpha;
+ for (c = 0; c < layer1_size; c++)
+ neu1e[c] += g * syn1neg[c + l2];
+ for (c = 0; c < layer1_size; c++)
+ syn1neg[c + l2] += g * neu1[c];
+ if (cap == 1)
+ for (c = 0; c < layer1_size; c++)
+ capParam(syn1neg, c + l2);
+ }
+ // Noise Contrastive Estimation
+ if (nce > 0)
+ for (d = 0; d < nce + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random
+ * (unsigned long long) 25214903917 + 11;
+ if (word_to_group != NULL
+ && word_to_group[word] != -1) {
+ target = word;
+ while (target == word) {
+ target = group_to_table[word_to_group[word]
+ * table_size
+ + (next_random >> 16) % table_size];
+ next_random = next_random
+ * (unsigned long long) 25214903917
+ + 11;
+ }
+ } else {
+ target =
+ table[(next_random >> 16) % table_size];
+ }
+ if (target == 0)
+ target = next_random % (vocab_size - 1) + 1;
+ if (target == word)
+ continue;
+ label = 0;
+ }
+ l2 = target * layer1_size;
+ f = 0;
+
+ for (c = 0; c < layer1_size; c++)
+ f += neu1[c] * syn1nce[c + l2];
+ if (f > MAX_EXP)
+ g = (label - 1) * alpha;
+ else if (f < -MAX_EXP)
+ g = (label - 0) * alpha;
+ else {
+ f = exp(f);
+ g =
+ (label
+ - f
+ / (noise_distribution[target]
+ * nce + f)) * alpha;
+ }
+ for (c = 0; c < layer1_size; c++)
+ neu1e[c] += g * syn1nce[c + l2];
+ for (c = 0; c < layer1_size; c++)
+ syn1nce[c + l2] += g * neu1[c];
+ if (cap == 1)
+ for (c = 0; c < layer1_size; c++)
+ capParam(syn1nce, c + l2);
+ }
+ // hidden -> in
+ for (a = b; a < window * 2 + 1 - b; a++)
+ if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0)
+ continue;
+ if (c >= sentence_length)
+ continue;
+ last_word = sen[c];
+ if (last_word == -1)
+ continue;
+ for (c = 0; c < layer1_size; c++)
+ syn0[c + last_word * layer1_size] += neu1e[c];
+ }
+ }
+ } else if (type == 1) { //train skip-gram
+ for (a = b; a < window * 2 + 1 - b; a++)
+ if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0)
+ continue;
+ if (c >= sentence_length)
+ continue;
+ last_word = sen[c];
+ if (last_word == -1)
+ continue;
+ l1 = last_word * layer1_size;
+ for (c = 0; c < layer1_size; c++)
+ neu1e[c] = 0;
+ // HIERARCHICAL SOFTMAX
+ if (hs)
+ for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * layer1_size;
+ // Propagate hidden -> output
+ for (c = 0; c < layer1_size; c++)
+ f += syn0[c + l1] * syn1[c + l2];
+ if (f <= -MAX_EXP)
+ continue;
+ else if (f >= MAX_EXP)
+ continue;
+ else
+ f = expTable[(int) ((f + MAX_EXP)
+ * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < layer1_size; c++)
+ neu1e[c] += g * syn1[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < layer1_size; c++)
+ syn1[c + l2] += g * syn0[c + l1];
+ if (cap == 1)
+ for (c = 0; c < layer1_size; c++)
+ capParam(syn1, c + l2);
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0)
+ for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random
+ * (unsigned long long) 25214903917 + 11;
+ if (word_to_group != NULL
+ && word_to_group[word] != -1) {
+ target = word;
+ while (target == word) {
+ target =
+ group_to_table[word_to_group[word]
+ * table_size
+ + (next_random >> 16)
+ % table_size];
+ next_random =
+ next_random
+ * (unsigned long long) 25214903917
+ + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ } else {
+ target = table[(next_random >> 16)
+ % table_size];
+ }
+ if (target == 0)
+ target = next_random % (vocab_size - 1) + 1;
+ if (target == word)
+ continue;
+ label = 0;
+ }
+ l2 = target * layer1_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++)
+ f += syn0[c + l1] * syn1neg[c + l2];
+ if (f > MAX_EXP)
+ g = (label - 1) * alpha;
+ else if (f < -MAX_EXP)
+ g = (label - 0) * alpha;
+ else
+ g =
+ (label
+ - expTable[(int) ((f + MAX_EXP)
+ * (EXP_TABLE_SIZE
+ / MAX_EXP / 2))])
+ * alpha;
+ for (c = 0; c < layer1_size; c++)
+ neu1e[c] += g * syn1neg[c + l2];
+ for (c = 0; c < layer1_size; c++)
+ syn1neg[c + l2] += g * syn0[c + l1];
+ if (cap == 1)
+ for (c = 0; c < layer1_size; c++)
+ capParam(syn1neg, c + l2);
+ }
+ //Noise Contrastive Estimation
+ if (nce > 0)
+ for (d = 0; d < nce + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random
+ * (unsigned long long) 25214903917 + 11;
+ if (word_to_group != NULL
+ && word_to_group[word] != -1) {
+ target = word;
+ while (target == word) {
+ target =
+ group_to_table[word_to_group[word]
+ * table_size
+ + (next_random >> 16)
+ % table_size];
+ next_random =
+ next_random
+ * (unsigned long long) 25214903917
+ + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ } else {
+ target = table[(next_random >> 16)
+ % table_size];
+ }
+ if (target == 0)
+ target = next_random % (vocab_size - 1) + 1;
+ if (target == word)
+ continue;
+ label = 0;
+ }
+ l2 = target * layer1_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++)
+ f += syn0[c + l1] * syn1nce[c + l2];
+ if (f > MAX_EXP)
+ g = (label - 1) * alpha;
+ else if (f < -MAX_EXP)
+ g = (label - 0) * alpha;
+ else {
+ f = exp(f);
+ g = (label
+ - f
+ / (noise_distribution[target]
+ * nce + f)) * alpha;
+ }
+ for (c = 0; c < layer1_size; c++)
+ neu1e[c] += g * syn1nce[c + l2];
+ for (c = 0; c < layer1_size; c++)
+ syn1nce[c + l2] += g * syn0[c + l1];
+ if (cap == 1)
+ for (c = 0; c < layer1_size; c++)
+ capParam(syn1nce, c + l2);
+ }
+ // Learn weights input -> hidden
+ for (c = 0; c < layer1_size; c++)
+ syn0[c + l1] += neu1e[c];
+ }
+ } else if (type == 2) { //train the cwindow architecture
+ // in -> hidden
+ cw = 0;
+ for (a = 0; a < window * 2 + 1; a++)
+ if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0)
+ continue;
+ if (c >= sentence_length)
+ continue;
+ last_word = sen[c];
+ if (last_word == -1)
+ continue;
+ window_offset = a * layer1_size;
+ if (a > window)
+ window_offset -= layer1_size;
+ for (c = 0; c < layer1_size; c++)
+ neu1[c + window_offset] += syn0[c
+ + last_word * layer1_size];
+ cw++;
+ }
+ if (cw) {
+ if (hs)
+ for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * window_layer_size;
+ // Propagate hidden -> output
+ for (c = 0; c < window_layer_size; c++)
+ f += neu1[c] * syn1_window[c + l2];
+ if (f <= -MAX_EXP)
+ continue;
+ else if (f >= MAX_EXP)
+ continue;
+ else
+ f = expTable[(int) ((f + MAX_EXP)
+ * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < window_layer_size; c++)
+ neu1e[c] += g * syn1_window[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < window_layer_size; c++)
+ syn1_window[c + l2] += g * neu1[c];
+ if (cap == 1)
+ for (c = 0; c < window_layer_size; c++)
+ capParam(syn1_window, c + l2);
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0)
+ for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random
+ * (unsigned long long) 25214903917 + 11;
+ if (word_to_group != NULL
+ && word_to_group[word] != -1) {
+ target = word;
+ while (target == word) {
+ target = group_to_table[word_to_group[word]
+ * table_size
+ + (next_random >> 16) % table_size];
+ next_random = next_random
+ * (unsigned long long) 25214903917
+ + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ } else {
+ target =
+ table[(next_random >> 16) % table_size];
+ }
+ if (target == 0)
+ target = next_random % (vocab_size - 1) + 1;
+ if (target == word)
+ continue;
+ label = 0;
+ }
+ l2 = target * window_layer_size;
+ f = 0;
+ for (c = 0; c < window_layer_size; c++)
+ f += neu1[c] * syn1neg_window[c + l2];
+ if (f > MAX_EXP)
+ g = (label - 1) * alpha;
+ else if (f < -MAX_EXP)
+ g = (label - 0) * alpha;
+ else
+ g = (label
+ - expTable[(int) ((f + MAX_EXP)
+ * (EXP_TABLE_SIZE / MAX_EXP / 2))])
+ * alpha;
+ for (c = 0; c < window_layer_size; c++)
+ neu1e[c] += g * syn1neg_window[c + l2];
+ for (c = 0; c < window_layer_size; c++)
+ syn1neg_window[c + l2] += g * neu1[c];
+ if (cap == 1)
+ for (c = 0; c < window_layer_size; c++)
+ capParam(syn1neg_window, c + l2);
+ }
+ // Noise Contrastive Estimation
+ if (nce > 0)
+ for (d = 0; d < nce + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random
+ * (unsigned long long) 25214903917 + 11;
+ if (word_to_group != NULL
+ && word_to_group[word] != -1) {
+ target = word;
+ while (target == word) {
+ target = group_to_table[word_to_group[word]
+ * table_size
+ + (next_random >> 16) % table_size];
+ next_random = next_random
+ * (unsigned long long) 25214903917
+ + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ } else {
+ target =
+ table[(next_random >> 16) % table_size];
+ }
+ if (target == 0)
+ target = next_random % (vocab_size - 1) + 1;
+ if (target == word)
+ continue;
+ label = 0;
+ }
+ l2 = target * window_layer_size;
+ f = 0;
+ for (c = 0; c < window_layer_size; c++)
+ f += neu1[c] * syn1nce_window[c + l2];
+ if (f > MAX_EXP)
+ g = (label - 1) * alpha;
+ else if (f < -MAX_EXP)
+ g = (label - 0) * alpha;
+ else {
+ f = exp(f);
+ g =
+ (label
+ - f
+ / (noise_distribution[target]
+ * nce + f)) * alpha;
+ }
+ for (c = 0; c < window_layer_size; c++)
+ neu1e[c] += g * syn1nce_window[c + l2];
+ for (c = 0; c < window_layer_size; c++)
+ syn1nce_window[c + l2] += g * neu1[c];
+ if (cap == 1)
+ for (c = 0; c < window_layer_size; c++)
+ capParam(syn1nce_window, c + l2);
+ }
+ // hidden -> in
+ for (a = 0; a < window * 2 + 1; a++)
+ if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0)
+ continue;
+ if (c >= sentence_length)
+ continue;
+ last_word = sen[c];
+ if (last_word == -1)
+ continue;
+ window_offset = a * layer1_size;
+ if (a > window)
+ window_offset -= layer1_size;
+ for (c = 0; c < layer1_size; c++)
+ syn0[c + last_word * layer1_size] += neu1e[c
+ + window_offset];
+ }
+ }
+ } else if (type == 3) { //train structured skip-gram
+ for (a = 0; a < window * 2 + 1; a++)
+ if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0)
+ continue;
+ if (c >= sentence_length)
+ continue;
+ last_word = sen[c];
+ if (last_word == -1)
+ continue;
+ l1 = last_word * layer1_size;
+ window_offset = a * layer1_size;
+ if (a > window)
+ window_offset -= layer1_size;
+ for (c = 0; c < layer1_size; c++)
+ neu1e[c] = 0;
+ // HIERARCHICAL SOFTMAX
+ if (hs)
+ for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * window_layer_size;
+ // Propagate hidden -> output
+ for (c = 0; c < layer1_size; c++)
+ f += syn0[c + l1]
+ * syn1_window[c + l2 + window_offset];
+ if (f <= -MAX_EXP)
+ continue;
+ else if (f >= MAX_EXP)
+ continue;
+ else
+ f = expTable[(int) ((f + MAX_EXP)
+ * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < layer1_size; c++)
+ neu1e[c] += g
+ * syn1_window[c + l2 + window_offset];
+ // Learn weights hidden -> output
+ for (c = 0; c < layer1_size; c++)
+ syn1[c + l2 + window_offset] += g
+ * syn0[c + l1];
+ if (cap == 1)
+ for (c = 0; c < layer1_size; c++)
+ capParam(syn1, c + l2 + window_offset);
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0)
+ for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random
+ * (unsigned long long) 25214903917 + 11;
+ if (word_to_group != NULL
+ && word_to_group[word] != -1) {
+ target = word;
+ while (target == word) {
+ target =
+ group_to_table[word_to_group[word]
+ * table_size
+ + (next_random >> 16)
+ % table_size];
+ next_random =
+ next_random
+ * (unsigned long long) 25214903917
+ + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ } else {
+ target = table[(next_random >> 16)
+ % table_size];
+ }
+ if (target == 0)
+ target = next_random % (vocab_size - 1) + 1;
+ if (target == word)
+ continue;
+ label = 0;
+ }
+ l2 = target * window_layer_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++)
+ f +=
+ syn0[c + l1]
+ * syn1neg_window[c + l2
+ + window_offset];
+ if (f > MAX_EXP)
+ g = (label - 1) * alpha;
+ else if (f < -MAX_EXP)
+ g = (label - 0) * alpha;
+ else
+ g =
+ (label
+ - expTable[(int) ((f + MAX_EXP)
+ * (EXP_TABLE_SIZE
+ / MAX_EXP / 2))])
+ * alpha;
+ for (c = 0; c < layer1_size; c++)
+ neu1e[c] +=
+ g
+ * syn1neg_window[c + l2
+ + window_offset];
+ for (c = 0; c < layer1_size; c++)
+ syn1neg_window[c + l2 + window_offset] += g
+ * syn0[c + l1];
+ if (cap == 1)
+ for (c = 0; c < layer1_size; c++)
+ capParam(syn1neg_window,
+ c + l2 + window_offset);
+ }
+ // Noise Constrastive Estimation
+ if (nce > 0)
+ for (d = 0; d < nce + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random
+ * (unsigned long long) 25214903917 + 11;
+ if (word_to_group != NULL
+ && word_to_group[word] != -1) {
+ target = word;
+ while (target == word) {
+ target =
+ group_to_table[word_to_group[word]
+ * table_size
+ + (next_random >> 16)
+ % table_size];
+ next_random =
+ next_random
+ * (unsigned long long) 25214903917
+ + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ } else {
+ target = table[(next_random >> 16)
+ % table_size];
+ }
+ if (target == 0)
+ target = next_random % (vocab_size - 1) + 1;
+ if (target == word)
+ continue;
+ label = 0;
+ }
+ l2 = target * window_layer_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++)
+ f +=
+ syn0[c + l1]
+ * syn1nce_window[c + l2
+ + window_offset];
+ if (f > MAX_EXP)
+ g = (label - 1) * alpha;
+ else if (f < -MAX_EXP)
+ g = (label - 0) * alpha;
+ else {
+ f = exp(f);
+ g = (label
+ - f
+ / (noise_distribution[target]
+ * nce + f)) * alpha;
+ }
+ for (c = 0; c < layer1_size; c++)
+ neu1e[c] +=
+ g
+ * syn1nce_window[c + l2
+ + window_offset];
+ for (c = 0; c < layer1_size; c++)
+ syn1nce_window[c + l2 + window_offset] += g
+ * syn0[c + l1];
+ if (cap == 1)
+ for (c = 0; c < layer1_size; c++)
+ capParam(syn1nce_window,
+ c + l2 + window_offset);
+ }
+ // Learn weights input -> hidden
+ for (c = 0; c < layer1_size; c++) {
+ syn0[c + l1] += neu1e[c];
+ if (syn0[c + l1] > 50)
+ syn0[c + l1] = 50;
+ if (syn0[c + l1] < -50)
+ syn0[c + l1] = -50;
+ }
+ }
+ } else if (type == 4) { //training senna
+ // in -> hidden
+ cw = 0;
+ for (a = 0; a < window * 2 + 1; a++)
+ if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0)
+ continue;
+ if (c >= sentence_length)
+ continue;
+ last_word = sen[c];
+ if (last_word == -1)
+ continue;
+ window_offset = a * layer1_size;
+ if (a > window)
+ window_offset -= layer1_size;
+ for (c = 0; c < layer1_size; c++)
+ neu1[c + window_offset] += syn0[c
+ + last_word * layer1_size];
+ cw++;
+ }
+ if (cw) {
+ for (a = 0; a < window_hidden_size; a++) {
+ c = a * window_layer_size;
+ for (b = 0; b < window_layer_size; b++) {
+ neu2[a] += syn_window_hidden[c + b] * neu1[b];
+ }
+ }
+ if (hs)
+ for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * window_hidden_size;
+ // Propagate hidden -> output
+ for (c = 0; c < window_hidden_size; c++)
+ f += hardTanh(neu2[c]) * syn_hidden_word[c + l2];
+ if (f <= -MAX_EXP)
+ continue;
+ else if (f >= MAX_EXP)
+ continue;
+ else
+ f = expTable[(int) ((f + MAX_EXP)
+ * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < window_hidden_size; c++)
+ neu2e[c] += dHardTanh(neu2[c], g) * g
+ * syn_hidden_word[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < window_hidden_size; c++)
+ syn_hidden_word[c + l2] += dHardTanh(neu2[c], g) * g
+ * neu2[c];
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0)
+ for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random
+ * (unsigned long long) 25214903917 + 11;
+ if (word_to_group != NULL
+ && word_to_group[word] != -1) {
+ target = word;
+ while (target == word) {
+ target = group_to_table[word_to_group[word]
+ * table_size
+ + (next_random >> 16) % table_size];
+ next_random = next_random
+ * (unsigned long long) 25214903917
+ + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ } else {
+ target =
+ table[(next_random >> 16) % table_size];
+ }
+ if (target == 0)
+ target = next_random % (vocab_size - 1) + 1;
+ if (target == word)
+ continue;
+ label = 0;
+ }
+ l2 = target * window_hidden_size;
+ f = 0;
+ for (c = 0; c < window_hidden_size; c++)
+ f += hardTanh(neu2[c])
+ * syn_hidden_word_neg[c + l2];
+ if (f > MAX_EXP)
+ g = (label - 1) * alpha / negative;
+ else if (f < -MAX_EXP)
+ g = (label - 0) * alpha / negative;
+ else
+ g = (label
+ - expTable[(int) ((f + MAX_EXP)
+ * (EXP_TABLE_SIZE / MAX_EXP / 2))])
+ * alpha / negative;
+ for (c = 0; c < window_hidden_size; c++)
+ neu2e[c] += dHardTanh(neu2[c], g) * g
+ * syn_hidden_word_neg[c + l2];
+ for (c = 0; c < window_hidden_size; c++)
+ syn_hidden_word_neg[c + l2] += dHardTanh(neu2[c], g)
+ * g * neu2[c];
+ }
+ for (a = 0; a < window_hidden_size; a++)
+ for (b = 0; b < window_layer_size; b++)
+ neu1e[b] += neu2e[a]
+ * syn_window_hidden[a * window_layer_size + b];
+ for (a = 0; a < window_hidden_size; a++)
+ for (b = 0; b < window_layer_size; b++)
+ syn_window_hidden[a * window_layer_size + b] += neu2e[a]
+ * neu1[b];
+ // hidden -> in
+ for (a = 0; a < window * 2 + 1; a++)
+ if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0)
+ continue;
+ if (c >= sentence_length)
+ continue;
+ last_word = sen[c];
+ if (last_word == -1)
+ continue;
+ window_offset = a * layer1_size;
+ if (a > window)
+ window_offset -= layer1_size;
+ for (c = 0; c < layer1_size; c++)
+ syn0[c + last_word * layer1_size] += neu1e[c
+ + window_offset];
+ }
+ }
+ } else {
+ printf("unknown type %i", type);
+ exit(0);
+ }
+ sentence_position++;
+ if (sentence_position >= sentence_length) {
+ sentence_length = 0;
+ continue;
+ }
+ }
+ fclose(fi);
+ free(neu1);
+ free(neu1e);
+ pthread_exit(NULL);
+}
+
+void TrainModel() {
+ long a, b, c, d;
+ FILE *fo;
+ pthread_t *pt = (pthread_t *) malloc(num_threads * sizeof(pthread_t));
+ printf("Starting training using file %s\n", train_file);
+ starting_alpha = alpha;
+ if (read_vocab_file[0] != 0)
+ ReadVocab();
+ else
+ LearnVocabFromTrainFile();
+ if (save_vocab_file[0] != 0)
+ SaveVocab();
+ if (output_file[0] == 0)
+ return;
+ InitNet();
+ if (negative > 0 || nce > 0)
+ InitUnigramTable();
+ if (negative_classes_file[0] != 0)
+ InitClassUnigramTable();
+ start = clock();
+ for (a = 0; a < num_threads; a++)
+ pthread_create(&pt[a], NULL, TrainModelThread, (void *) a);
+ for (a = 0; a < num_threads; a++)
+ pthread_join(pt[a], NULL);
+ fo = fopen(output_file, "wb");
+ if (classes == 0) {
+ // Save the word vectors
+ fprintf(fo, "%lld %lld\n", vocab_size, layer1_size);
+ for (a = 0; a < vocab_size; a++) {
+ fprintf(fo, "%s ", vocab[a].word);
+ if (binary)
+ for (b = 0; b < layer1_size; b++)
+ fwrite(&syn0[a * layer1_size + b], sizeof(real), 1, fo);
+ else
+ for (b = 0; b < layer1_size; b++)
+ fprintf(fo, "%lf ", syn0[a * layer1_size + b]);
+ fprintf(fo, "\n");
+ }
+ } else {
+ // Run K-means on the word vectors
+ int clcn = classes, iter = 10, closeid;
+ int *centcn = (int *) malloc(classes * sizeof(int));
+ int *cl = (int *) calloc(vocab_size, sizeof(int));
+ real closev, x;
+ real *cent = (real *) calloc(classes * layer1_size, sizeof(real));
+ for (a = 0; a < vocab_size; a++)
+ cl[a] = a % clcn;
+ for (a = 0; a < iter; a++) {
+ for (b = 0; b < clcn * layer1_size; b++)
+ cent[b] = 0;
+ for (b = 0; b < clcn; b++)
+ centcn[b] = 1;
+ for (c = 0; c < vocab_size; c++) {
+ for (d = 0; d < layer1_size; d++)
+ cent[layer1_size * cl[c] + d] += syn0[c * layer1_size + d];
+ centcn[cl[c]]++;
+ }
+ for (b = 0; b < clcn; b++) {
+ closev = 0;
+ for (c = 0; c < layer1_size; c++) {
+ cent[layer1_size * b + c] /= centcn[b];
+ closev += cent[layer1_size * b + c]
+ * cent[layer1_size * b + c];
+ }
+ closev = sqrt(closev);
+ for (c = 0; c < layer1_size; c++)
+ cent[layer1_size * b + c] /= closev;
+ }
+ for (c = 0; c < vocab_size; c++) {
+ closev = -10;
+ closeid = 0;
+ for (d = 0; d < clcn; d++) {
+ x = 0;
+ for (b = 0; b < layer1_size; b++)
+ x += cent[layer1_size * d + b]
+ * syn0[c * layer1_size + b];
+ if (x > closev) {
+ closev = x;
+ closeid = d;
+ }
+ }
+ cl[c] = closeid;
+ }
+ }
+ // Save the K-means classes
+ for (a = 0; a < vocab_size; a++)
+ fprintf(fo, "%s %d\n", vocab[a].word, cl[a]);
+ free(centcn);
+ free(cent);
+ free(cl);
+ }
+ fclose(fo);
+ if (save_net_file[0] != 0)
+ SaveNet();
+}
+
+int ArgPos(char *str, int argc, char **argv) {
+ int a;
+ for (a = 1; a < argc; a++)
+ if (!strcmp(str, argv[a])) {
+ if (a == argc - 1) {
+ printf("Argument missing for %s\n", str);
+ exit(1);
+ }
+ return a;
+ }
+ return -1;
+}
+
+int main(int argc, char **argv) {
+ int i;
+ if (argc == 1) {
+ printf("WORD VECTOR estimation toolkit v 0.1c\n\n");
+ printf("Options:\n");
+ printf("Parameters for training:\n");
+ printf("\t-train <file>\n");
+ printf("\t\tUse text data from <file> to train the model\n");
+ printf("\t-output <file>\n");
+ printf(
+ "\t\tUse <file> to save the resulting word vectors / word clusters\n");
+ printf("\t-size <int>\n");
+ printf("\t\tSet size of word vectors; default is 100\n");
+ printf("\t-window <int>\n");
+ printf("\t\tSet max skip length between words; default is 5\n");
+ printf("\t-sample <float>\n");
+ printf(
+ "\t\tSet threshold for occurrence of words. Those that appear with higher frequency in the training data\n");
+ printf(
+ "\t\twill be randomly down-sampled; default is 1e-3, useful range is (0, 1e-5)\n");
+ printf("\t-hs <int>\n");
+ printf("\t\tUse Hierarchical Softmax; default is 0 (not used)\n");
+ printf("\t-negative <int>\n");
+ printf(
+ "\t\tNumber of negative examples; default is 5, common values are 3 - 10 (0 = not used)\n");
+ printf("\t-negative-classes <file>\n");
+ printf("\t\tNegative classes to sample from\n");
+ printf("\t-nce <int>\n");
+ printf(
+ "\t\tNumber of negative examples for nce; default is 0, common values are 3 - 10 (0 = not used)\n");
+ printf("\t-threads <int>\n");
+ printf("\t\tUse <int> threads (default 12)\n");
+ printf("\t-iter <int>\n");
+ printf("\t\tRun more training iterations (default 5)\n");
+ printf("\t-min-count <int>\n");
+ printf(
+ "\t\tThis will discard words that appear less than <int> times; default is 5\n");
+ printf("\t-alpha <float>\n");
+ printf(
+ "\t\tSet the starting learning rate; default is 0.025 for skip-gram and 0.05 for CBOW\n");
+ printf("\t-classes <int>\n");
+ printf(
+ "\t\tOutput word classes rather than word vectors; default number of classes is 0 (vectors are written)\n");
+ printf("\t-debug <int>\n");
+ printf(
+ "\t\tSet the debug mode (default = 2 = more info during training)\n");
+ printf("\t-binary <int>\n");
+ printf(
+ "\t\tSave the resulting vectors in binary moded; default is 0 (off)\n");
+ printf("\t-save-vocab <file>\n");
+ printf("\t\tThe vocabulary will be saved to <file>\n");
+ printf("\t-read-vocab <file>\n");
+ printf(
+ "\t\tThe vocabulary will be read from <file>, not constructed from the training data\n");
+ printf("\t-read-net <file>\n");
+ printf(
+ "\t\tThe net parameters will be read from <file>, not initialized randomly\n");
+ printf("\t-save-net <file>\n");
+ printf("\t\tThe net parameters will be saved to <file>\n");
+ printf("\t-type <int>\n");
+ printf(
+ "\t\tType of embeddings (0 for cbow, 1 for skipngram, 2 for cwindow, 3 for structured skipngram, 4 for senna type)\n");
+ printf("\t-cap <int>\n");
+ printf(
+ "\t\tlimit the parameter values to the range [-50, 50]; default is 0 (off)\n");
+ printf("\nExamples:\n");
+ printf(
+ "./word2vec -train data.txt -output vec.txt -size 200 -window 5 -sample 1e-4 -negative 5 -hs 0 -binary 0 -type 1 -iter 3\n\n");
+ return 0;
+ }
+ output_file[0] = 0;
+ save_vocab_file[0] = 0;
+ read_vocab_file[0] = 0;
+ save_net_file[0] = 0;
+ read_net_file[0] = 0;
+ negative_classes_file[0] = 0;
+ if ((i = ArgPos((char *) "-size", argc, argv)) > 0)
+ layer1_size = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *) "-train", argc, argv)) > 0)
+ strcpy(train_file, argv[i + 1]);
+ if ((i = ArgPos((char *) "-save-vocab", argc, argv)) > 0)
+ strcpy(save_vocab_file, argv[i + 1]);
+ if ((i = ArgPos((char *) "-read-vocab", argc, argv)) > 0)
+ strcpy(read_vocab_file, argv[i + 1]);
+ if ((i = ArgPos((char *) "-save-net", argc, argv)) > 0)
+ strcpy(save_net_file, argv[i + 1]);
+ if ((i = ArgPos((char *) "-read-net", argc, argv)) > 0)
+ strcpy(read_net_file, argv[i + 1]);
+ if ((i = ArgPos((char *) "-debug", argc, argv)) > 0)
+ debug_mode = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *) "-binary", argc, argv)) > 0)
+ binary = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *) "-type", argc, argv)) > 0)
+ type = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *) "-output", argc, argv)) > 0)
+ strcpy(output_file, argv[i + 1]);
+ if ((i = ArgPos((char *) "-window", argc, argv)) > 0)
+ window = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *) "-sample", argc, argv)) > 0)
+ sample = atof(argv[i + 1]);
+ if ((i = ArgPos((char *) "-hs", argc, argv)) > 0)
+ hs = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *) "-negative", argc, argv)) > 0)
+ negative = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *) "-negative-classes", argc, argv)) > 0)
+ strcpy(negative_classes_file, argv[i + 1]);
+ if ((i = ArgPos((char *) "-nce", argc, argv)) > 0)
+ nce = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *) "-threads", argc, argv)) > 0)
+ num_threads = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *) "-iter", argc, argv)) > 0)
+ iter = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *) "-min-count", argc, argv)) > 0)
+ min_count = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *) "-classes", argc, argv)) > 0)
+ classes = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *) "-cap", argc, argv)) > 0)
+ cap = atoi(argv[i + 1]);
+ if (type == 0 || type == 2 || type == 4)
+ alpha = 0.05;
+ if ((i = ArgPos((char *) "-alpha", argc, argv)) > 0)
+ alpha = atof(argv[i + 1]);
+ vocab = (struct vocab_word *) calloc(vocab_max_size,
+ sizeof(struct vocab_word));
+ vocab_hash = (int *) calloc(vocab_hash_size, sizeof(int));
+ expTable = (real *) malloc((EXP_TABLE_SIZE + 1) * sizeof(real));
+ for (i = 0; i < EXP_TABLE_SIZE; i++) {
+ expTable[i] = exp((i / (real) EXP_TABLE_SIZE * 2 - 1) * MAX_EXP); // Precompute the exp() table
+ expTable[i] = expTable[i] / (expTable[i] + 1); // Precompute f(x) = x / (x + 1)
+ }
+ TrainModel();
+ return 0;
+}
+
diff --git a/wordless2vec.c b/wordless2vec.c
new file mode 100644
index 0000000..e68bbee
--- /dev/null
+++ b/wordless2vec.c
@@ -0,0 +1,1696 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include <pthread.h>
+
+#define MAX_STRING 100
+#define EXP_TABLE_SIZE 1000
+#define MAX_EXP 6
+#define MAX_SENTENCE_LENGTH 1000
+#define MAX_CODE_LENGTH 40
+
+const int vocab_hash_size = 30000000; // Maximum 30 * 0.7 = 21M words in the vocabulary
+
+typedef float real; // Precision of float numbers
+
+struct vocab_word {
+ long long cn;
+ int *point;
+ char *word, *code, codelen;
+};
+
+char train_file[MAX_STRING], output_file[MAX_STRING];
+char save_vocab_file[MAX_STRING], read_vocab_file[MAX_STRING];
+struct vocab_word *vocab;
+int binary = 0, type = 1, debug_mode = 2, window = 5, min_count = 5, num_threads = 12, min_reduce = 1;
+int *vocab_hash;
+long long vocab_max_size = 1000, vocab_size = 0, layer1_size = 100;
+long long train_words = 0, word_count_actual = 0, iter = 5, file_size = 0, classes = 0;
+real alpha = 0.025, starting_alpha, sample = 1e-3;
+real *syn0, *syn1, *syn1neg, *expTable, *tanhTable;
+clock_t start;
+
+real *syn1_window, *syn1neg_window;
+int window_offset, window_layer_size;
+
+int window_hidden_size = 500;
+real *syn_window_hidden, *syn_hidden_word, *syn_hidden_word_neg;
+
+int hs = 0, negative = 5;
+const int table_size = 1e8;
+int *table;
+
+//constrastive negative sampling
+char negative_classes_file[MAX_STRING];
+int *word_to_group;
+int *group_to_table; //group_size*table_size
+int class_number;
+
+//char table
+int rep = 0;
+#define C_MAX_CODE 65536
+int c_state_size = 5;
+int c_cell_size = 5;
+int c_proj_size = 3;
+int c_params_number;
+int c_lstm_params_number;
+real *c_lookup;
+
+//char lstm params
+real *f_init_state;
+real *f_init_cell;
+real *b_init_state;
+real *b_init_cell;
+real *f_b_params;
+
+//short term memory
+real*syn0_initial;
+real*syn0_in_memory;
+
+int batch_size = 100;
+
+void printStates(real*states, int start){
+ int s;
+ printf("igate ");
+ for(s = 0; s < c_state_size; s++){ printf("%f ", states[start++]);} printf("\n");
+ printf("fgate ");
+ for(s = 0; s < c_state_size; s++){ printf("%f ", states[start++]);} printf("\n");
+ printf("c + tanh ");
+ for(s = 0; s < c_state_size; s++){ printf("%f ", states[start++]);} printf("\n");
+ printf("cgate ");
+ for(s = 0; s < c_state_size; s++){ printf("%f ", states[start++]);} printf("\n");
+ printf("ogate ");
+ for(s = 0; s < c_state_size; s++){ printf("%f ", states[start++]);} printf("\n");
+ printf("cgate + tanh ");
+ for(s = 0; s < c_state_size; s++){ printf("%f ", states[start++]);} printf("\n");
+ printf("state ");
+ for(s = 0; s < c_state_size; s++){ printf("%f ", states[start++]);} printf("\n");
+
+}
+
+void lstmForwardBlock(real *chars, int char_start, real*states, int next_start, int p){
+ int i,s,si,sf,sc,sct,sctt,so,s1=next_start;
+ int prev_cell_start = s1 - c_state_size*4;
+ int prev_state_start = s1 - c_state_size;
+ if(states[prev_cell_start]==0){
+// printf("crap! cell is zero\n");
+ }
+ if(states[prev_state_start]==0){
+// printf("crap! state is zero\n");
+ }
+ if(states[s1]!=0){
+// printf("crap! start not zero\n");
+ }
+ //igate
+ si = s1;
+ for(s = 0; s < c_state_size; s++){
+ for(i = 0; i < c_proj_size; i++){
+ states[s1]+=chars[char_start+i]*f_b_params[p++];
+ }
+ for(i = 0; i < c_cell_size; i++){
+ states[s1]+=states[prev_cell_start+i]*f_b_params[p++];
+ }
+ for(i = 0; i < c_state_size; i++){
+ states[s1]+=states[prev_state_start+i]*f_b_params[p++];
+ }
+ states[s1]+=f_b_params[p++];
+ if(states[s1]>MAX_EXP){
+ states[s1]=1;
+ }
+ else if(states[s1]<-MAX_EXP){
+ states[s1]=0;
+ }
+ else{
+ states[s1] = expTable[(int)((states[s1] + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ }
+ s1++;
+ }
+
+ //fgate
+ sf=s1;
+ for(s = 0; s < c_state_size; s++){
+ for(i = 0; i < c_proj_size; i++){
+ states[s1]+=chars[char_start+i]*f_b_params[p++];
+ }
+ for(i = 0; i < c_cell_size; i++){
+ states[s1]+=states[prev_cell_start+i]*f_b_params[p++];
+ }
+ for(i = 0; i < c_state_size; i++){
+ states[s1]+=states[prev_state_start+i]*f_b_params[p++];
+ }
+ states[s1]+=f_b_params[p++];
+ if(states[s1]>MAX_EXP){
+ states[s1]=1;
+ }
+ else if(states[s1]<-MAX_EXP){
+ states[s1]=0;
+ }
+ else{
+ states[s1] = expTable[(int)((states[s1] + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ }
+ s1++;
+ }
+
+ //c + tanh
+ sct=s1;
+ for(s = 0; s < c_state_size; s++){
+ for(i = 0; i < c_proj_size; i++){
+ states[s1]+=chars[char_start+i]*f_b_params[p++];
+ }
+ for(i = 0; i < c_state_size; i++){
+ states[s1]+=states[prev_state_start+i]*f_b_params[p++];
+ }
+ states[s1]+=f_b_params[p++];
+ if(states[s1]>MAX_EXP){
+ states[s1]=1;
+ }
+ else if(states[s1]<-MAX_EXP){
+ states[s1]=-1;
+ }
+ else{
+ states[s1] = tanhTable[(int)((states[s1] + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ }
+ s1++;
+ }
+
+ //cgate
+ sc=s1;
+ for(s = 0; s < c_state_size; s++){
+ states[s1]+=states[sct+s]*states[si+s]+states[sf+s]*states[prev_cell_start+s];
+ s1++;
+ }
+
+ //ogate
+ so=s1;
+ for(s = 0; s < c_state_size; s++){
+ for(i = 0; i < c_proj_size; i++){
+ states[s1]+=chars[char_start+i]*f_b_params[p++];
+ }
+ for(i = 0; i < c_cell_size; i++){
+ states[s1]+=states[sc+s]*f_b_params[p++];
+ }
+ for(i = 0; i < c_state_size; i++){
+ states[s1]+=states[prev_state_start+i]*f_b_params[p++];
+ }
+ states[s1]+=f_b_params[p++];
+ if(states[s1]>MAX_EXP){
+ states[s1]=1;
+ }
+ else if(states[s1]<-MAX_EXP){
+ states[s1]=0;
+ }
+ else{
+ states[s1] = expTable[(int)((states[s1] + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ }
+ s1++;
+ }
+
+ //cgate + tan
+ sctt = s1;
+ for(s = 0; s < c_state_size; s++){
+ if(states[sc+s]>MAX_EXP){
+ states[s1]=1;
+ }
+ else if(states[sc+s]<-MAX_EXP){
+ states[s1]=-1;
+ }
+ else{
+ states[s1] = tanhTable[(int)((states[sc+s] + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ }
+ s1++;
+ }
+
+ //next state
+ if(states[s1]!=0){
+ printf("crap! end not zero\n");
+ }
+ for(s = 0; s < c_state_size; s++){
+ states[s1] = states[sctt+s] * states[so+s];
+ s1++;
+ }
+
+
+}
+
+void lstmBackwardBlock(real *chars, int char_start, real*states, int next_start, int pStart, real*chars_e, real*states_e, real*lstm_params_e){
+ int p=pStart+c_lstm_params_number-1;
+ int i,s,si,sf,sc,sct,sctt,so,s1=next_start+c_state_size*7-1;
+ int prev_cell_start = next_start - c_state_size*4;
+ int prev_state_start = next_start - c_state_size;
+
+ real e;
+ si = next_start;
+ sf = next_start + c_state_size;
+ sct = next_start + c_state_size*2;
+ sc = next_start + c_state_size*3;
+ so = next_start + c_state_size*4;
+ sctt = next_start + c_state_size*5;
+
+ //next state
+ for(s = c_state_size-1; s >= 0; s--){
+ states_e[sctt+s] += states_e[s1]*states[so+s];
+ states_e[so+s] += states_e[s1]*states[sctt+s];
+ s1--;
+ }
+
+
+ //cgate + tan
+ for(s = c_state_size-1; s >= 0; s--){
+ states_e[sc+s] += states_e[s1]*(1-states[s1]*states[s1]);
+ s1--;
+ }
+
+ //ogate
+ for(s = c_state_size-1; s >= 0; s--){
+ e = states[s1]*(1-states[s1])*states_e[s1];
+ for(i = c_proj_size-1; i >= 0; i--){
+ chars_e[char_start+i] += e*f_b_params[p];
+ lstm_params_e[p--] += e*chars[char_start+i];
+ }
+
+ for(i = c_cell_size-1; i >= 0; i--){
+ states_e[sc+s]+=e*f_b_params[p];
+ lstm_params_e[p--] += e*states[sc+s];
+ }
+ for(i = c_state_size-1; i >= 0; i--){
+ states_e[prev_state_start+i] += e*f_b_params[p];
+ lstm_params_e[p--] += e*states_e[prev_state_start+i];
+ }
+ lstm_params_e[p--]+=e;
+ s1--;
+ }
+
+ //cgate
+ for(s = c_state_size-1; s >= 0; s--){
+ states_e[sct+s]+=states_e[s1]*states[si+s];
+ states_e[si+s]+=states_e[s1]*states[sct+s];
+ states_e[prev_cell_start+s]+=states_e[s1]*states[sf+s];
+ states_e[sf+s]+=states_e[s1]*states[prev_cell_start+s];
+ s1--;
+ }
+
+ //c + tanh
+ for(s = c_state_size-1; s >= 0; s--){
+ e = (1-states[s1]*states[s1])*states_e[s1];
+ for(i = c_proj_size-1; i >= 0; i--){
+ chars_e[char_start+i] += e*f_b_params[p];
+ lstm_params_e[p--] += e*chars[char_start+i];
+ }
+ for(i = c_state_size-1; i >= 0; i--){
+ states_e[prev_state_start+i]+=e*f_b_params[p];
+ lstm_params_e[p--] +=e*states[prev_state_start+i];
+ }
+ lstm_params_e[p--]+=e;
+ s1--;
+ }
+
+
+ //fgate
+ for(s = c_state_size-1; s >= 0; s--){
+ e = states[s1]*(1-states[s1])*states_e[s1];
+ for(i = c_proj_size-1; i >= 0; i--){
+ chars_e[char_start+i] += e*f_b_params[p];
+ lstm_params_e[p--] += e*chars[char_start+i];
+ }
+ for(i = c_cell_size-1; i >= 0; i--){
+ states_e[prev_cell_start+i]+=e*f_b_params[p];
+ lstm_params_e[p--] +=e*states[prev_cell_start+i];
+ }
+ for(i = c_state_size-1; i >= 0; i--){
+ states_e[prev_state_start+i]+=e*f_b_params[p];
+ lstm_params_e[p--] +=e*states[prev_state_start+i];
+ }
+ lstm_params_e[p--]+=e;
+ s1--;
+ }
+
+ //igate
+ for(s = c_state_size-1; s >= 0; s--){
+ e = states[s1]*(1-states[s1])*states_e[s1];
+ for(i = c_proj_size-1; i >= 0; i--){
+ chars_e[char_start+i] += e*f_b_params[p];
+ lstm_params_e[p--] += e*chars[char_start+i];
+ }
+ for(i = c_cell_size-1; i >= 0; i--){
+ states_e[prev_cell_start+i]+=e*f_b_params[p];
+ lstm_params_e[p--] +=e*states[prev_cell_start+i];
+ }
+ for(i = c_state_size-1; i >= 0; i--){
+ states_e[prev_state_start+i]+=e*f_b_params[p];
+ lstm_params_e[p--] +=e*states[prev_state_start+i];
+ }
+ lstm_params_e[p--]+=e;
+ s1--;
+ }
+
+ if(p+1!=pStart){
+ printf("crap! p!= %d p = %d\n",pStart,p+1);
+ }
+ if(s1+1!=next_start){
+ printf("crap! s1!= %d s1 = %d\n",next_start,s1+1);
+ }
+}
+
+void lstmForward(char* word, int len, real* out, real *f_states, real *b_states, real *chars){
+ //printf("%s\n",word);
+ int i,s,c,p;
+ for(s = 0; s < (len+1)*(c_state_size*7); s++){
+ f_states[s]=0;
+ b_states[s]=0;
+ }
+ for(s = 0; s < c_state_size; s++){
+ f_states[c_state_size*3]=f_init_cell[s];
+ f_states[c_state_size*6]=f_init_state[s];
+ b_states[c_state_size*3]=b_init_cell[s];
+ b_states[c_state_size*6]=b_init_state[s];
+ }
+ for(i = 0; i < len; i++){
+ c = word[i];
+ if(c>=C_MAX_CODE){c=C_MAX_CODE-1;}
+ for(s = 0; s < c_proj_size; s++){
+ chars[i*c_proj_size+s] = c_lookup[c*c_proj_size+s];
+ }
+ }
+
+ for(i = 0; i < len; i++){
+ lstmForwardBlock(chars, i*c_proj_size, f_states, (i+1)*c_state_size*7, 0);
+ }
+ for(i = 0; i < len; i++){
+ lstmForwardBlock(chars, (len-i-1)*c_proj_size, b_states, (i+1)*c_state_size*7, c_lstm_params_number);
+ }
+
+ //printStates(f_states,c_state_size*7);
+
+ for(s = 0; s < layer1_size; s++){
+ out[s]=0;
+ }
+ p=c_lstm_params_number*2;
+ for(s = 0; s < layer1_size; s++){
+ for(i = 0; i < c_state_size; i++){
+ out[s]+=f_states[len*c_state_size*7+c_state_size*6 + i]*f_b_params[p++];
+ out[s]+=b_states[len*c_state_size*7+c_state_size*6 + i]*f_b_params[p++];
+ }
+// printf("%f ",out[s]);
+ }
+// printf("\n");
+}
+
+void lstmBackward(char* word, int len, real* out, real *f_states, real *b_states, real* chars, real* out_e, real *f_states_e, real *b_states_e, real* chars_e, real *lstm_params_e){
+ int i,s,c=-1,p;
+ for(s = 0; s < (len+1)*c_state_size*7; s++){
+ f_states_e[s]=0;
+ b_states_e[s]=0;
+ }
+ for(i = 0; i < len; i++){
+ for(s = 0; s < c_proj_size; s++){
+ chars_e[i*c_proj_size+s] = 0;
+ }
+ }
+ for(i = 0; i < c_lstm_params_number*2; i++){
+ lstm_params_e[i]=0;
+ }
+
+ p=c_lstm_params_number*2;
+ for(s = 0; s < layer1_size; s++){
+ for(i = 0; i < c_state_size; i++){
+ f_states_e[len*c_state_size*7+c_state_size*6 + i]+=out_e[s]*f_b_params[p];
+ f_b_params[p] += out_e[s] * f_states[len*c_state_size*7+c_state_size*6 + i];
+ p++;
+ b_states_e[len*c_state_size*7+c_state_size*6 + i]+=out_e[s]*f_b_params[p];
+ f_b_params[p] += out_e[s] * b_states[len*c_state_size*7+c_state_size*6 + i];
+ p++;
+ }
+ }
+
+ for(i = len-1; i >=0; i--){
+ lstmBackwardBlock(chars, i*c_proj_size, b_states, (i+1)*c_state_size*7, c_lstm_params_number, chars_e,b_states_e,lstm_params_e);
+ }
+
+ for(i = len-1; i >=0; i--){
+ lstmBackwardBlock(chars, (len-i-1)*c_proj_size, f_states, (i+1)*c_state_size*7, 0, chars_e,f_states_e,lstm_params_e);
+ }
+
+ for(i = 0; i < len; i++){
+ c = word[i];
+ if(c>=C_MAX_CODE){c=C_MAX_CODE-1;}
+ for(s = 0; s < c_proj_size; s++){
+ c_lookup[c*c_proj_size+s] += chars_e[i*c_proj_size+s];
+ }
+ }
+
+ for(s = 0; s < c_state_size; s++){
+ f_init_cell[s]+=f_states_e[c_state_size*3];
+ f_init_state[s]+=f_states_e[c_state_size*6];
+ b_init_cell[s]+=b_states_e[c_state_size*3];
+ b_init_state[s]+=b_states_e[c_state_size*6];
+ }
+
+ for(s = 0; s < c_lstm_params_number*2; s++){
+ f_b_params[c]+=lstm_params_e[c];
+ }
+
+ //printf("out\n");
+ //printStates(f_states,(len)*c_state_size*7);
+ //printf("err\n");
+ //printStates(f_states_e,(len)*c_state_size*7);
+
+}
+
+void lstmFitting(char* word, int len, real* out, real *f_states, real *b_states, real* chars, real* out_expected, real* out_e, real *f_states_e, real *b_states_e, real* chars_e, real *lstm_params_e){
+ int i;
+ real g = 0;
+ lstmForward(word, len, out, f_states, b_states, chars);
+ for(i = 0; i < layer1_size; i++){
+ if(out_expected[i]>out[i]){
+ g += out_expected[i]-out[i];
+ }
+ else{
+ g += -out_expected[i]+out[i];
+ }
+ out_e[i] = (out_expected[i]-out[i])*alpha;
+ }
+ printf("error before fitting = %f\n", g);
+ lstmBackward(word, len, out, f_states, b_states, chars, out_e, f_states_e, b_states_e, chars_e, lstm_params_e);
+ lstmForward(word, len, out, f_states, b_states, chars);
+ g=0;
+ for(i = 0; i < layer1_size; i++){
+ if(out_expected[i]>out[i]){
+ g += out_expected[i]-out[i];
+ }
+ else{
+ g += -out_expected[i]+out[i];
+ }
+ out_e[i] = (out_expected[i]-out[i])*alpha;
+ }
+ printf("error after fitting = %f\n", g);
+
+}
+
+real hardTanh(real x){
+ if(x>=1){
+ return 1;
+ }
+ else if(x<=-1){
+ return -1;
+ }
+ else{
+ return x;
+ }
+}
+
+real dHardTanh(real x, real g){
+ if(x > 1 && g > 0){
+ return 0;
+ }
+ if(x < -1 && g < 0){
+ return 0;
+ }
+ return 1;
+}
+
+void InitUnigramTable() {
+ int a, i;
+ long long train_words_pow = 0;
+ real d1, power = 0.75;
+ table = (int *)malloc(table_size * sizeof(int));
+ for (a = 0; a < vocab_size; a++) train_words_pow += pow(vocab[a].cn, power);
+ i = 0;
+ d1 = pow(vocab[i].cn, power) / (real)train_words_pow;
+ for (a = 0; a < table_size; a++) {
+ table[a] = i;
+ if (a / (real)table_size > d1) {
+ i++;
+ d1 += pow(vocab[i].cn, power) / (real)train_words_pow;
+ }
+ if (i >= vocab_size) i = vocab_size - 1;
+ }
+}
+
+// Reads a single word from a file, assuming space + tab + EOL to be word boundaries
+void ReadWord(char *word, FILE *fin) {
+ int a = 0, ch;
+ while (!feof(fin)) {
+ ch = fgetc(fin);
+ if (ch == 13) continue;
+ if ((ch == ' ') || (ch == '\t') || (ch == '\n')) {
+ if (a > 0) {
+ if (ch == '\n') ungetc(ch, fin);
+ break;
+ }
+ if (ch == '\n') {
+ strcpy(word, (char *)"</s>");
+ return;
+ } else continue;
+ }
+ word[a] = ch;
+ a++;
+ if (a >= MAX_STRING - 1) a--; // Truncate too long words
+ }
+ word[a] = 0;
+}
+
+// Returns hash value of a word
+int GetWordHash(char *word) {
+ unsigned long long a, hash = 0;
+ for (a = 0; a < strlen(word); a++) hash = hash * 257 + word[a];
+ hash = hash % vocab_hash_size;
+ return hash;
+}
+
+// Returns position of a word in the vocabulary; if the word is not found, returns -1
+int SearchVocab(char *word) {
+ unsigned int hash = GetWordHash(word);
+ while (1) {
+ if (vocab_hash[hash] == -1) return -1;
+ if (!strcmp(word, vocab[vocab_hash[hash]].word)) return vocab_hash[hash];
+ hash = (hash + 1) % vocab_hash_size;
+ }
+ return -1;
+}
+
+// Reads a word and returns its index in the vocabulary
+int ReadWordIndex(FILE *fin) {
+ char word[MAX_STRING];
+ ReadWord(word, fin);
+ if (feof(fin)) return -1;
+ return SearchVocab(word);
+}
+
+// Reads a word and returns its index in the vocabulary
+int ReadAndStoreWordIndex(FILE *fin, char* word) {
+ ReadWord(word, fin);
+ if (feof(fin)) return -1;
+ return SearchVocab(word);
+}
+
+// Adds a word to the vocabulary
+int AddWordToVocab(char *word) {
+ unsigned int hash, length = strlen(word) + 1;
+ if (length > MAX_STRING) length = MAX_STRING;
+ vocab[vocab_size].word = (char *)calloc(length, sizeof(char));
+ strcpy(vocab[vocab_size].word, word);
+ vocab[vocab_size].cn = 0;
+ vocab_size++;
+ // Reallocate memory if needed
+ if (vocab_size + 2 >= vocab_max_size) {
+ vocab_max_size += 1000;
+ vocab = (struct vocab_word *)realloc(vocab, vocab_max_size * sizeof(struct vocab_word));
+ }
+ hash = GetWordHash(word);
+ while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
+ vocab_hash[hash] = vocab_size - 1;
+ return vocab_size - 1;
+}
+
+// Used later for sorting by word counts
+int VocabCompare(const void *a, const void *b) {
+ return ((struct vocab_word *)b)->cn - ((struct vocab_word *)a)->cn;
+}
+
+// Sorts the vocabulary by frequency using word counts
+void SortVocab() {
+ int a, size;
+ unsigned int hash;
+ // Sort the vocabulary and keep </s> at the first position
+ qsort(&vocab[1], vocab_size - 1, sizeof(struct vocab_word), VocabCompare);
+ for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
+ size = vocab_size;
+ train_words = 0;
+ for (a = 0; a < size; a++) {
+ // Words occuring less than min_count times will be discarded from the vocab
+ if ((vocab[a].cn < min_count) && (a != 0)) {
+ vocab_size--;
+ free(vocab[a].word);
+ } else {
+ // Hash will be re-computed, as after the sorting it is not actual
+ hash=GetWordHash(vocab[a].word);
+ while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
+ vocab_hash[hash] = a;
+ train_words += vocab[a].cn;
+ }
+ }
+ vocab = (struct vocab_word *)realloc(vocab, (vocab_size + 1) * sizeof(struct vocab_word));
+ // Allocate memory for the binary tree construction
+ for (a = 0; a < vocab_size; a++) {
+ vocab[a].code = (char *)calloc(MAX_CODE_LENGTH, sizeof(char));
+ vocab[a].point = (int *)calloc(MAX_CODE_LENGTH, sizeof(int));
+ }
+}
+
+// Reduces the vocabulary by removing infrequent tokens
+void ReduceVocab() {
+ int a, b = 0;
+ unsigned int hash;
+ for (a = 0; a < vocab_size; a++) if (vocab[a].cn > min_reduce) {
+ vocab[b].cn = vocab[a].cn;
+ vocab[b].word = vocab[a].word;
+ b++;
+ } else free(vocab[a].word);
+ vocab_size = b;
+ for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
+ for (a = 0; a < vocab_size; a++) {
+ // Hash will be re-computed, as it is not actual
+ hash = GetWordHash(vocab[a].word);
+ while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
+ vocab_hash[hash] = a;
+ }
+ fflush(stdout);
+ min_reduce++;
+}
+
+// Create binary Huffman tree using the word counts
+// Frequent words will have short uniqe binary codes
+void CreateBinaryTree() {
+ long long a, b, i, min1i, min2i, pos1, pos2, point[MAX_CODE_LENGTH];
+ char code[MAX_CODE_LENGTH];
+ long long *count = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
+ long long *binary = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
+ long long *parent_node = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
+ for (a = 0; a < vocab_size; a++) count[a] = vocab[a].cn;
+ for (a = vocab_size; a < vocab_size * 2; a++) count[a] = 1e15;
+ pos1 = vocab_size - 1;
+ pos2 = vocab_size;
+ // Following algorithm constructs the Huffman tree by adding one node at a time
+ for (a = 0; a < vocab_size - 1; a++) {
+ // First, find two smallest nodes 'min1, min2'
+ if (pos1 >= 0) {
+ if (count[pos1] < count[pos2]) {
+ min1i = pos1;
+ pos1--;
+ } else {
+ min1i = pos2;
+ pos2++;
+ }
+ } else {
+ min1i = pos2;
+ pos2++;
+ }
+ if (pos1 >= 0) {
+ if (count[pos1] < count[pos2]) {
+ min2i = pos1;
+ pos1--;
+ } else {
+ min2i = pos2;
+ pos2++;
+ }
+ } else {
+ min2i = pos2;
+ pos2++;
+ }
+ count[vocab_size + a] = count[min1i] + count[min2i];
+ parent_node[min1i] = vocab_size + a;
+ parent_node[min2i] = vocab_size + a;
+ binary[min2i] = 1;
+ }
+ // Now assign binary code to each vocabulary word
+ for (a = 0; a < vocab_size; a++) {
+ b = a;
+ i = 0;
+ while (1) {
+ code[i] = binary[b];
+ point[i] = b;
+ i++;
+ b = parent_node[b];
+ if (b == vocab_size * 2 - 2) break;
+ }
+ vocab[a].codelen = i;
+ vocab[a].point[0] = vocab_size - 2;
+ for (b = 0; b < i; b++) {
+ vocab[a].code[i - b - 1] = code[b];
+ vocab[a].point[i - b] = point[b] - vocab_size;
+ }
+ }
+ free(count);
+ free(binary);
+ free(parent_node);
+}
+
+void LearnVocabFromTrainFile() {
+ char word[MAX_STRING];
+ FILE *fin;
+ long long a, i;
+ for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
+ fin = fopen(train_file, "rb");
+ if (fin == NULL) {
+ printf("ERROR: training data file not found!\n");
+ exit(1);
+ }
+ vocab_size = 0;
+ AddWordToVocab((char *)"</s>");
+ while (1) {
+ ReadWord(word, fin);
+ if (feof(fin)) break;
+ train_words++;
+ if ((debug_mode > 1) && (train_words % 100000 == 0)) {
+ printf("%lldK%c", train_words / 1000, 13);
+ fflush(stdout);
+ }
+ i = SearchVocab(word);
+ if (i == -1) {
+ a = AddWordToVocab(word);
+ vocab[a].cn = 1;
+ } else vocab[i].cn++;
+ if (vocab_size > vocab_hash_size * 0.7) ReduceVocab();
+ }
+ SortVocab();
+ if (debug_mode > 0) {
+ printf("Vocab size: %lld\n", vocab_size);
+ printf("Words in train file: %lld\n", train_words);
+ }
+ file_size = ftell(fin);
+ fclose(fin);
+}
+
+void SaveVocab() {
+ long long i;
+ FILE *fo = fopen(save_vocab_file, "wb");
+ for (i = 0; i < vocab_size; i++) fprintf(fo, "%s %lld\n", vocab[i].word, vocab[i].cn);
+ fclose(fo);
+}
+
+void ReadVocab() {
+ long long a, i = 0;
+ char c;
+ char word[MAX_STRING];
+ FILE *fin = fopen(read_vocab_file, "rb");
+ if (fin == NULL) {
+ printf("Vocabulary file not found\n");
+ exit(1);
+ }
+ for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
+ vocab_size = 0;
+ while (1) {
+ ReadWord(word, fin);
+ if (feof(fin)) break;
+ a = AddWordToVocab(word);
+ fscanf(fin, "%lld%c", &vocab[a].cn, &c);
+ i++;
+ }
+ SortVocab();
+ if (debug_mode > 0) {
+ printf("Vocab size: %lld\n", vocab_size);
+ printf("Words in train file: %lld\n", train_words);
+ }
+ fin = fopen(train_file, "rb");
+ if (fin == NULL) {
+ printf("ERROR: training data file not found!\n");
+ exit(1);
+ }
+ fseek(fin, 0, SEEK_END);
+ file_size = ftell(fin);
+ fclose(fin);
+}
+
+void InitClassUnigramTable() {
+ long long a,c;
+ printf("loading class unigrams \n");
+ FILE *fin = fopen(negative_classes_file, "rb");
+ if (fin == NULL) {
+ printf("ERROR: class file not found!\n");
+ exit(1);
+ }
+ word_to_group = (int *)malloc(vocab_size * sizeof(int));
+ for(a = 0; a < vocab_size; a++) word_to_group[a] = -1;
+ char class[MAX_STRING];
+ char prev_class[MAX_STRING];
+ prev_class[0] = 0;
+ char word[MAX_STRING];
+ class_number = -1;
+ while (1) {
+ if (feof(fin)) break;
+ ReadWord(class, fin);
+ ReadWord(word, fin);
+ int word_index = SearchVocab(word);
+ if (word_index != -1){
+ if(strcmp(class, prev_class) != 0){
+ class_number++;
+ strcpy(prev_class, class);
+ }
+ word_to_group[word_index] = class_number;
+ }
+ ReadWord(word, fin);
+ }
+ class_number++;
+ fclose(fin);
+
+ group_to_table = (int *)malloc(table_size * class_number * sizeof(int));
+ long long train_words_pow = 0;
+ real d1, power = 0.75;
+
+ for(c = 0; c < class_number; c++){
+ long long offset = c * table_size;
+ train_words_pow = 0;
+ for (a = 0; a < vocab_size; a++) if(word_to_group[a] == c) train_words_pow += pow(vocab[a].cn, power);
+ int i = 0;
+ while(word_to_group[i]!=c && i < vocab_size) i++;
+ d1 = pow(vocab[i].cn, power) / (real)train_words_pow;
+ for (a = 0; a < table_size; a++) {
+ //printf("index %lld , word %d\n", a, i);
+ group_to_table[offset + a] = i;
+ if (a / (real)table_size > d1) {
+ i++;
+ while(word_to_group[i]!=c && i < vocab_size) i++;
+ d1 += pow(vocab[i].cn, power) / (real)train_words_pow;
+ }
+ if (i >= vocab_size) while(word_to_group[i]!=c && i >= 0) i--;
+ }
+ }
+}
+
+void InitNet() {
+ long long a, b;
+ unsigned long long next_random = 1;
+ window_layer_size = layer1_size*window*2;
+ a = posix_memalign((void **)&syn0, 128, (long long)vocab_size * layer1_size * sizeof(real));
+ if (syn0 == NULL) {printf("Memory allocation failed\n"); exit(1);}
+
+ if (hs) {
+ a = posix_memalign((void **)&syn1, 128, (long long)vocab_size * layer1_size * sizeof(real));
+ if (syn1 == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn1_window, 128, (long long)vocab_size * window_layer_size * sizeof(real));
+ if (syn1_window == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn_hidden_word, 128, (long long)vocab_size * window_hidden_size * sizeof(real));
+ if (syn_hidden_word == NULL) {printf("Memory allocation failed\n"); exit(1);}
+
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++)
+ syn1[a * layer1_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_layer_size; b++)
+ syn1_window[a * window_layer_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_hidden_size; b++)
+ syn_hidden_word[a * window_hidden_size + b] = 0;
+ }
+ if (negative>0) {
+ a = posix_memalign((void **)&syn1neg, 128, (long long)vocab_size * layer1_size * sizeof(real));
+ if (syn1neg == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn1neg_window, 128, (long long)vocab_size * window_layer_size * sizeof(real));
+ if (syn1neg_window == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn_hidden_word_neg, 128, (long long)vocab_size * window_hidden_size * sizeof(real));
+ if (syn_hidden_word_neg == NULL) {printf("Memory allocation failed\n"); exit(1);}
+
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++)
+ syn1neg[a * layer1_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_layer_size; b++)
+ syn1neg_window[a * window_layer_size + b] = 0;
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < window_hidden_size; b++)
+ syn_hidden_word_neg[a * window_hidden_size + b] = 0;
+ }
+ for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++) {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ syn0[a * layer1_size + b] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / layer1_size;
+ }
+
+ a = posix_memalign((void **)&syn_window_hidden, 128, window_hidden_size * window_layer_size * sizeof(real));
+ if (syn_window_hidden == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ for (a = 0; a < window_hidden_size * window_layer_size; a++){
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ syn_window_hidden[a] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / (window_hidden_size*window_layer_size);
+ }
+
+ if(rep == 1 || rep == 2){
+ a = posix_memalign((void **)&c_lookup, 128, (long long)C_MAX_CODE * c_proj_size * sizeof(real));
+ if (c_lookup == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ for (a = 0; a < C_MAX_CODE * c_proj_size; a++){
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ c_lookup[a] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / (c_proj_size);
+ }
+
+ a = posix_memalign((void **)&f_init_state, 128, c_state_size * sizeof(real));
+ if (f_init_state == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&f_init_cell, 128, c_state_size * sizeof(real));
+ if (f_init_cell == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&b_init_state, 128, c_state_size * sizeof(real));
+ if (b_init_state == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&b_init_cell, 128, c_state_size * sizeof(real));
+ if (b_init_cell == NULL) {printf("Memory allocation failed\n"); exit(1);}
+
+ for (a = 0; a < c_state_size; a++){
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ f_init_state[a] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / (c_state_size);
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ f_init_cell[a] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / (c_state_size);
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ b_init_state[a] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / (c_state_size);
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ b_init_cell[a] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / (c_state_size);
+ }
+
+ c_lstm_params_number = /*input*/ (c_state_size+c_cell_size+c_proj_size+1)*c_state_size +
+ /*forget*/ (c_state_size+c_cell_size+c_proj_size+1)*c_state_size +
+ /*cell*/ (c_state_size+c_proj_size+1)*c_state_size +
+ /*output*/ (c_state_size+c_cell_size+c_proj_size+1)*c_state_size;
+
+ c_params_number = ( c_lstm_params_number * 2 + (c_state_size*2)*layer1_size) ;
+ a = posix_memalign((void **)&f_b_params, 128, c_params_number* sizeof(real));
+ if (f_b_params == NULL) {printf("Memory allocation failed\n"); exit(1);}
+
+ for (a = 0; a < c_params_number; a++){
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ f_b_params[a] = (((next_random & 0xFFFF) / (real)65536) - 0.5) ;
+ }
+ }
+
+ if(rep == 2){
+ a = posix_memalign((void **)&syn0_initial, 128, (long long)vocab_size * layer1_size * sizeof(real));
+ if (syn0_initial == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ a = posix_memalign((void **)&syn0_in_memory, 128, (long long)vocab_size * sizeof(real));
+ if (syn0_in_memory == NULL) {printf("Memory allocation failed\n"); exit(1);}
+ for(a = 0; a < vocab_size; a++){
+ syn0_in_memory[a] = -1;
+ }
+ }
+ CreateBinaryTree();
+}
+
+void *TrainModelThread(void *id) {
+ long long a, b, d, cw, word, last_word, sentence_length = 0, sentence_position = 0;
+ long long word_count = 0, last_word_count = 0, sen[MAX_SENTENCE_LENGTH + 1];
+ long long l1, l2, c, target, label, local_iter = iter;
+ char c_sen[(MAX_SENTENCE_LENGTH + 1) * MAX_STRING];
+ unsigned long long next_random = (long long)id;
+ real f, g, acc_g=0;
+ clock_t now;
+ int input_len_1 = layer1_size;
+ if(type == 2 || type == 4){
+ input_len_1=window_layer_size;
+ }
+ real *neu1 = (real *)calloc(input_len_1, sizeof(real));
+ real *neu1e = (real *)calloc(input_len_1, sizeof(real));
+
+ int input_len_2 = 0;
+ if(type == 4){
+ input_len_2 = window_hidden_size;
+ }
+ real *neu2 = (real *)calloc(input_len_2, sizeof(real));
+ real *neu2e = (real *)calloc(input_len_2, sizeof(real));
+
+ FILE *fi = fopen(train_file, "rb");
+ fseek(fi, file_size / (long long)num_threads * (long long)id, SEEK_SET);
+
+ real *f_states = (real *)calloc((c_state_size * 7) * (MAX_STRING + 1), sizeof(real));
+ real *f_states_e = (real *)calloc((c_state_size * 7) * (MAX_STRING + 1), sizeof(real));
+ real *b_states = (real *)calloc((c_state_size * 7) * (MAX_STRING + 1), sizeof(real));
+ real *b_states_e = (real *)calloc((c_state_size * 7) * (MAX_STRING + 1), sizeof(real));
+ real *chars = (real *)calloc(c_proj_size * MAX_STRING, sizeof(real));
+ real *chars_e = (real *)calloc(c_proj_size * MAX_STRING, sizeof(real));
+ real *lstm_params_e = (real *)calloc(c_lstm_params_number*2, sizeof(real));
+
+ //short term memory vars
+ real global_divergence = -1;
+ int in_mem = 0;
+ int skip=0, non_skip=0;
+
+ while (1) {
+ if (word_count - last_word_count > 10000) {
+ word_count_actual += word_count - last_word_count;
+ last_word_count = word_count;
+ if ((debug_mode > 1)) {
+ now=clock();
+ printf("%cAlpha: %f Progress: %.2f%% Words/thread/sec: %.2fk : error %.4f", 13, alpha,
+ word_count_actual / (real)(iter * train_words + 1) * 100,
+ word_count_actual / ((real)(now - start + 1) / (real)CLOCKS_PER_SEC * 1000), acc_g);
+ if(rep == 2){
+ printf(" skiprate %f",skip/(real)(skip+non_skip));
+ }
+ acc_g=0;
+ skip=0;
+ non_skip=0;
+ fflush(stdout);
+ }
+ alpha = starting_alpha * (1 - word_count_actual / (real)(iter * train_words + 1));
+ if (alpha < starting_alpha * 0.0001) alpha = starting_alpha * 0.0001;
+ }
+ if (sentence_length == 0) {
+ while (1) {
+ word = ReadAndStoreWordIndex(fi, &c_sen[sentence_length*MAX_STRING]);
+ if (feof(fi)) break;
+ if (word == -1) continue;
+ word_count++;
+ if (word == 0) break;
+ // The subsampling randomly discards frequent words while keeping the ranking same
+ if (sample > 0) {
+ real ran = (sqrt(vocab[word].cn / (sample * train_words)) + 1) * (sample * train_words) / vocab[word].cn;
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if (ran < (next_random & 0xFFFF) / (real)65536) continue;
+ }
+ sen[sentence_length] = word;
+ sentence_length++;
+ if (sentence_length >= MAX_SENTENCE_LENGTH) break;
+ }
+ sentence_position = 0;
+ }
+ if (feof(fi) || (word_count > train_words / num_threads)) {
+ word_count_actual += word_count - last_word_count;
+ local_iter--;
+ if (local_iter == 0) break;
+ word_count = 0;
+ last_word_count = 0;
+ sentence_length = 0;
+ fseek(fi, file_size / (long long)num_threads * (long long)id, SEEK_SET);
+ continue;
+ }
+ word = sen[sentence_position];
+ if (word == -1) continue;
+ for (c = 0; c < input_len_1; c++) neu1[c] = 0;
+ for (c = 0; c < input_len_1; c++) neu1e[c] = 0;
+ for (c = 0; c < input_len_2; c++) neu2[c] = 0;
+ for (c = 0; c < input_len_2; c++) neu2e[c] = 0;
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ b = next_random % window;
+ if (type == 0) { //train the cbow architecture
+ // in -> hidden
+ cw = 0;
+ for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ for (c = 0; c < layer1_size; c++) neu1[c] += syn0[c + last_word * layer1_size];
+ cw++;
+ }
+ if (cw) {
+ for (c = 0; c < layer1_size; c++) neu1[c] /= cw;
+ if (hs) for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * layer1_size;
+ // Propagate hidden -> output
+ for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1[c + l2];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * neu1[c];
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * layer1_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1neg[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg[c + l2];
+ for (c = 0; c < layer1_size; c++) syn1neg[c + l2] += g * neu1[c];
+ }
+ // hidden -> in
+ for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ for (c = 0; c < layer1_size; c++) syn0[c + last_word * layer1_size] += neu1e[c];
+ }
+ }
+ } else if(type==1) { //train skip-gram
+ for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ l1 = last_word * layer1_size;
+ for (c = 0; c < layer1_size; c++) neu1e[c] = 0;
+ // HIERARCHICAL SOFTMAX
+ if (hs) for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * layer1_size;
+ // Propagate hidden -> output
+ for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1[c + l2];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * syn0[c + l1];
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * layer1_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1neg[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg[c + l2];
+ for (c = 0; c < layer1_size; c++) syn1neg[c + l2] += g * syn0[c + l1];
+ }
+ // Learn weights input -> hidden
+ for (c = 0; c < layer1_size; c++) syn0[c + l1] += neu1e[c];
+ }
+ }
+ else if(type == 2){ //train the cwindow architecture
+ // in -> hidden
+ cw = 0;
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ window_offset = a*layer1_size;
+ if (a > window) window_offset-=layer1_size;
+ for (c = 0; c < layer1_size; c++) neu1[c+window_offset] += syn0[c + last_word * layer1_size];
+ cw++;
+ }
+ if (cw) {
+ if (hs) for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * window_layer_size;
+ // Propagate hidden -> output
+ for (c = 0; c < window_layer_size; c++) f += neu1[c] * syn1_window[c + l2];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < window_layer_size; c++) neu1e[c] += g * syn1_window[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < window_layer_size; c++) syn1_window[c + l2] += g * neu1[c];
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * window_layer_size;
+ f = 0;
+ for (c = 0; c < window_layer_size; c++) f += neu1[c] * syn1neg_window[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
+ acc_g+=g;
+ for (c = 0; c < window_layer_size; c++) neu1e[c] += g * syn1neg_window[c + l2];
+ for (c = 0; c < window_layer_size; c++) syn1neg_window[c + l2] += g * neu1[c];
+ }
+ // hidden -> in
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ window_offset = a * layer1_size;
+ if(a > window) window_offset -= layer1_size;
+ for (c = 0; c < layer1_size; c++) syn0[c + last_word * layer1_size] += neu1e[c + window_offset];
+ }
+ }
+ }
+ else if (type == 3){ //train structured skip-gram
+ char* c_word = &c_sen[sentence_position*MAX_STRING];
+ if(rep == 1){
+ lstmForward(c_word, strlen(c_word),neu1, f_states, b_states, chars);
+ }
+ else if(rep == 2){
+ l1 = word * layer1_size;
+ if(syn0_in_memory[word]==-1){
+ syn0_in_memory[word]=0;
+ lstmForward(c_word, strlen(c_word),&syn0_initial[l1], f_states, b_states, chars);
+ for (c = 0; c < layer1_size; c++) {syn0[c + l1] = syn0_initial[c + l1];neu1[c] += syn0[c + l1];}
+ in_mem = 1;
+ }
+ else{
+ for (c = 0; c < layer1_size; c++) neu1[c] += syn0[c + l1];
+ in_mem = 0;
+ }
+ }
+ else{
+ l1 = word * layer1_size;
+ for (c = 0; c < layer1_size; c++) neu1[c] += syn0[c + l1];
+ }
+
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+
+
+ window_offset = a * layer1_size;
+ if(a > window) window_offset -= layer1_size;
+ for (c = 0; c < layer1_size; c++) neu1e[c] = 0;
+ // HIERARCHICAL SOFTMAX
+ if (hs) for (d = 0; d < vocab[last_word].codelen; d++) {
+ f = 0;
+ l2 = vocab[last_word].point[d] * window_layer_size;
+ // Propagate hidden -> output
+ for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1_window[c + l2 + window_offset];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[last_word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1_window[c + l2 + window_offset];
+ // Learn weights hidden -> output
+ for (c = 0; c < layer1_size; c++) syn1[c + l2 + window_offset] += g * neu1[c];
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = last_word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[last_word] != -1){
+ target = last_word;
+ while(target == last_word) {
+ target = group_to_table[word_to_group[last_word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == last_word) continue;
+ label = 0;
+ }
+ l2 = target * window_layer_size;
+ f = 0;
+ for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1neg_window[c + l2 + window_offset];
+ if (f > MAX_EXP) g = (label - 1) * alpha;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
+ acc_g+=g;
+
+ for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg_window[c + l2 + window_offset];
+ for (c = 0; c < layer1_size; c++) syn1neg_window[c + l2 + window_offset] += g * neu1[c];
+
+ }
+
+ }
+ // Learn weights input -> hidden
+
+ if(rep == 1){
+ lstmBackward(c_word, strlen(c_word),neu1, f_states, b_states, chars, neu1e,f_states_e, b_states_e, chars_e, lstm_params_e);
+ }
+ else if(rep == 2){
+ g = 0;
+ l1 = word * layer1_size;
+ for (c = 0; c < layer1_size; c++) {
+ syn0[c + l1] += neu1e[c];
+ f = syn0[c + l1] - syn0_initial[c + l1];
+ if(f > 0){
+ g+=f;
+ }
+ else{
+ g-=f;
+ }
+ }
+ syn0_in_memory[word] = g;
+ if(global_divergence == -1){global_divergence = g;}
+ long skip_prob = vocab[word].cn-(log(vocab[word].cn)+1);
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+
+ if(skip_prob < next_random%vocab[word].cn){
+ non_skip++;
+ if(in_mem == 0){
+ lstmFitting(c_word, strlen(c_word),neu1, f_states, b_states, chars,&syn0[c +l1], neu1e,f_states_e, b_states_e, chars_e, lstm_params_e);
+ }
+ else{
+ lstmBackward(c_word, strlen(c_word),neu1, f_states, b_states, chars, neu1e,f_states_e, b_states_e, chars_e, lstm_params_e);
+ }
+ syn0_in_memory[word]=-1;
+ }
+ else{
+ skip++;
+ }
+ global_divergence = global_divergence*0.9 + g*0.1;
+ }
+ else{
+ l1 = word * layer1_size;
+ for (c = 0; c < layer1_size; c++) syn0[c + l1] += neu1e[c];
+ }
+ }
+ else if(type == 4){ //training senna
+ // in -> hidden
+ cw = 0;
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ window_offset = a*layer1_size;
+ if (a > window) window_offset-=layer1_size;
+ for (c = 0; c < layer1_size; c++) neu1[c+window_offset] += syn0[c + last_word * layer1_size];
+ cw++;
+ }
+ if (cw) {
+ for (a = 0; a < window_hidden_size; a++){
+ c = a*window_layer_size;
+ for(b = 0; b < window_layer_size; b++){
+ neu2[a] += syn_window_hidden[c + b] * neu1[b];
+ }
+ }
+ if (hs) for (d = 0; d < vocab[word].codelen; d++) {
+ f = 0;
+ l2 = vocab[word].point[d] * window_hidden_size;
+ // Propagate hidden -> output
+ for (c = 0; c < window_hidden_size; c++) f += hardTanh(neu2[c]) * syn_hidden_word[c + l2];
+ if (f <= -MAX_EXP) continue;
+ else if (f >= MAX_EXP) continue;
+ else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
+ // 'g' is the gradient multiplied by the learning rate
+ g = (1 - vocab[word].code[d] - f) * alpha;
+ // Propagate errors output -> hidden
+ for (c = 0; c < window_hidden_size; c++) neu2e[c] += dHardTanh(neu2[c],g) * g * syn_hidden_word[c + l2];
+ // Learn weights hidden -> output
+ for (c = 0; c < window_hidden_size; c++) syn_hidden_word[c + l2] += dHardTanh(neu2[c],g) * g * neu2[c];
+ }
+ // NEGATIVE SAMPLING
+ if (negative > 0) for (d = 0; d < negative + 1; d++) {
+ if (d == 0) {
+ target = word;
+ label = 1;
+ } else {
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ if(word_to_group != NULL && word_to_group[word] != -1){
+ target = word;
+ while(target == word) {
+ target = group_to_table[word_to_group[word]*table_size + (next_random >> 16) % table_size];
+ next_random = next_random * (unsigned long long)25214903917 + 11;
+ }
+ //printf("negative sampling %lld for word %s returned %s\n", d, vocab[word].word, vocab[target].word);
+ }
+ else{
+ target = table[(next_random >> 16) % table_size];
+ }
+ if (target == 0) target = next_random % (vocab_size - 1) + 1;
+ if (target == word) continue;
+ label = 0;
+ }
+ l2 = target * window_hidden_size;
+ f = 0;
+ for (c = 0; c < window_hidden_size; c++) f += hardTanh(neu2[c]) * syn_hidden_word_neg[c + l2];
+ if (f > MAX_EXP) g = (label - 1) * alpha / negative;
+ else if (f < -MAX_EXP) g = (label - 0) * alpha / negative;
+ else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha / negative;
+ for (c = 0; c < window_hidden_size; c++) neu2e[c] += dHardTanh(neu2[c],g) * g * syn_hidden_word_neg[c + l2];
+ for (c = 0; c < window_hidden_size; c++) syn_hidden_word_neg[c + l2] += dHardTanh(neu2[c],g) * g * neu2[c];
+ }
+ for (a = 0; a < window_hidden_size; a++)
+ for(b = 0; b < window_layer_size; b++)
+ neu1e[b] += neu2e[a] * syn_window_hidden[a*window_layer_size + b];
+ for (a = 0; a < window_hidden_size; a++)
+ for(b = 0; b < window_layer_size; b++)
+ syn_window_hidden[a*window_layer_size + b] += neu2e[a] * neu1[b];
+ // hidden -> in
+ for (a = 0; a < window * 2 + 1; a++) if (a != window) {
+ c = sentence_position - window + a;
+ if (c < 0) continue;
+ if (c >= sentence_length) continue;
+ last_word = sen[c];
+ if (last_word == -1) continue;
+ window_offset = a * layer1_size;
+ if(a > window) window_offset -= layer1_size;
+ for (c = 0; c < layer1_size; c++) syn0[c + last_word * layer1_size] += neu1e[c + window_offset];
+ }
+ }
+ }
+ else{
+ printf("unknown type %i", type);
+ exit(0);
+ }
+ sentence_position++;
+ if (sentence_position >= sentence_length) {
+ sentence_length = 0;
+ continue;
+ }
+ }
+ fclose(fi);
+ free(neu1);
+ free(neu1e);
+ pthread_exit(NULL);
+}
+
+void TrainModel() {
+ long a, b, c, d;
+ FILE *fo;
+ pthread_t *pt = (pthread_t *)malloc(num_threads * sizeof(pthread_t));
+ printf("Starting training using file %s\n", train_file);
+ starting_alpha = alpha;
+ if (read_vocab_file[0] != 0) ReadVocab(); else LearnVocabFromTrainFile();
+ if (save_vocab_file[0] != 0) SaveVocab();
+ if (output_file[0] == 0) return;
+ InitNet();
+ if (negative > 0) InitUnigramTable();
+ if (negative_classes_file[0] != 0) InitClassUnigramTable();
+ start = clock();
+ for (a = 0; a < num_threads; a++) pthread_create(&pt[a], NULL, TrainModelThread, (void *)a);
+ for (a = 0; a < num_threads; a++) pthread_join(pt[a], NULL);
+ fo = fopen(output_file, "wb");
+ if (classes == 0) {
+ // Save the word vectors
+ real *f_states = (real *)calloc((c_state_size * 7) * (MAX_STRING + 1), sizeof(real));
+ real *b_states = (real *)calloc((c_state_size * 7) * (MAX_STRING + 1), sizeof(real));
+ real *chars = (real *)calloc(c_proj_size * MAX_STRING, sizeof(real));
+ real *neu1 = (real *)calloc(layer1_size * MAX_STRING, sizeof(real));
+
+ fprintf(fo, "%lld %lld\n", vocab_size, layer1_size);
+ for (a = 0; a < vocab_size; a++) {
+ fprintf(fo, "%s ", vocab[a].word);
+ if(rep == 1 || rep == 2){
+ for (b = 0; b < layer1_size; b++) {neu1[b]=0;}
+ lstmForward(vocab[a].word, strlen(vocab[a].word),neu1, f_states,b_states,chars);
+ if (binary) for (b = 0; b < layer1_size; b++) fwrite(&neu1[b], sizeof(real), 1, fo);
+ else for (b = 0; b < layer1_size; b++) fprintf(fo, "%lf ", neu1[b]);
+ }
+ else{
+ if (binary) for (b = 0; b < layer1_size; b++) fwrite(&syn0[a * layer1_size + b], sizeof(real), 1, fo);
+ else for (b = 0; b < layer1_size; b++) fprintf(fo, "%lf ", syn0[a * layer1_size + b]);
+ }
+ fprintf(fo, "\n");
+ }
+ } else {
+ // Run K-means on the word vectors
+ int clcn = classes, iter = 10, closeid;
+ int *centcn = (int *)malloc(classes * sizeof(int));
+ int *cl = (int *)calloc(vocab_size, sizeof(int));
+ real closev, x;
+ real *cent = (real *)calloc(classes * layer1_size, sizeof(real));
+ for (a = 0; a < vocab_size; a++) cl[a] = a % clcn;
+ for (a = 0; a < iter; a++) {
+ for (b = 0; b < clcn * layer1_size; b++) cent[b] = 0;
+ for (b = 0; b < clcn; b++) centcn[b] = 1;
+ for (c = 0; c < vocab_size; c++) {
+ for (d = 0; d < layer1_size; d++) cent[layer1_size * cl[c] + d] += syn0[c * layer1_size + d];
+ centcn[cl[c]]++;
+ }
+ for (b = 0; b < clcn; b++) {
+ closev = 0;
+ for (c = 0; c < layer1_size; c++) {
+ cent[layer1_size * b + c] /= centcn[b];
+ closev += cent[layer1_size * b + c] * cent[layer1_size * b + c];
+ }
+ closev = sqrt(closev);
+ for (c = 0; c < layer1_size; c++) cent[layer1_size * b + c] /= closev;
+ }
+ for (c = 0; c < vocab_size; c++) {
+ closev = -10;
+ closeid = 0;
+ for (d = 0; d < clcn; d++) {
+ x = 0;
+ for (b = 0; b < layer1_size; b++) x += cent[layer1_size * d + b] * syn0[c * layer1_size + b];
+ if (x > closev) {
+ closev = x;
+ closeid = d;
+ }
+ }
+ cl[c] = closeid;
+ }
+ }
+ // Save the K-means classes
+ for (a = 0; a < vocab_size; a++) fprintf(fo, "%s %d\n", vocab[a].word, cl[a]);
+ free(centcn);
+ free(cent);
+ free(cl);
+ }
+ fclose(fo);
+}
+
+int ArgPos(char *str, int argc, char **argv) {
+ int a;
+ for (a = 1; a < argc; a++) if (!strcmp(str, argv[a])) {
+ if (a == argc - 1) {
+ printf("Argument missing for %s\n", str);
+ exit(1);
+ }
+ return a;
+ }
+ return -1;
+}
+
+int main(int argc, char **argv) {
+ int i;
+ if (argc == 1) {
+ printf("WORD VECTOR estimation toolkit v 0.1c\n\n");
+ printf("Options:\n");
+ printf("Parameters for training:\n");
+ printf("\t-train <file>\n");
+ printf("\t\tUse text data from <file> to train the model\n");
+ printf("\t-output <file>\n");
+ printf("\t\tUse <file> to save the resulting word vectors / word clusters\n");
+ printf("\t-size <int>\n");
+ printf("\t\tSet size of word vectors; default is 100\n");
+ printf("\t-window <int>\n");
+ printf("\t\tSet max skip length between words; default is 5\n");
+ printf("\t-sample <float>\n");
+ printf("\t\tSet threshold for occurrence of words. Those that appear with higher frequency in the training data\n");
+ printf("\t\twill be randomly down-sampled; default is 1e-3, useful range is (0, 1e-5)\n");
+ printf("\t-hs <int>\n");
+ printf("\t\tUse Hierarchical Softmax; default is 0 (not used)\n");
+ printf("\t-negative <int>\n");
+ printf("\t-negative-classes <file>\n");
+ printf("\t\tNumber of negative examples; default is 5, common values are 3 - 10 (0 = not used)\n");
+ printf("\t-threads <int>\n");
+ printf("\t\tUse <int> threads (default 12)\n");
+ printf("\t-iter <int>\n");
+ printf("\t\tRun more training iterations (default 5)\n");
+ printf("\t-min-count <int>\n");
+ printf("\t\tThis will discard words that appear less than <int> times; default is 5\n");
+ printf("\t-alpha <float>\n");
+ printf("\t\tSet the starting learning rate; default is 0.025 for skip-gram and 0.05 for CBOW\n");
+ printf("\t-classes <int>\n");
+ printf("\t\tOutput word classes rather than word vectors; default number of classes is 0 (vectors are written)\n");
+ printf("\t-debug <int>\n");
+ printf("\t\tSet the debug mode (default = 2 = more info during training)\n");
+ printf("\t-binary <int>\n");
+ printf("\t\tSave the resulting vectors in binary moded; default is 0 (off)\n");
+ printf("\t-save-vocab <file>\n");
+ printf("\t\tThe vocabulary will be saved to <file>\n");
+ printf("\t-read-vocab <file>\n");
+ printf("\t\tThe vocabulary will be read from <file>, not constructed from the training data\n");
+ printf("\t-type <int>\n");
+ printf("\t\tType of embeddings (0 for cbow, 1 for skipngram, 2 for cwindow, 3 for structured skipngram, 4 for senna type)\n");
+ printf("\t-rep <int>\n");
+ printf("\t\tType of word rep (0 for word, 1 for character, 2 for character with short term memory\n");
+ printf("\t-char-state-dim <int>\n");
+ printf("\t\tcharacter state size\n");
+ printf("\t-char-proj-dim <int>\n");
+ printf("\t\tcharacter projection size\n");
+ printf("\nExamples:\n");
+ printf("./word2vec -train data.txt -output vec.txt -size 200 -window 5 -sample 1e-4 -negative 5 -hs 0 -binary 0 -type 1 -iter 3\n\n");
+ return 0;
+ }
+ output_file[0] = 0;
+ save_vocab_file[0] = 0;
+ read_vocab_file[0] = 0;
+ negative_classes_file[0] = 0;
+ if ((i = ArgPos((char *)"-size", argc, argv)) > 0) layer1_size = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-train", argc, argv)) > 0) strcpy(train_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-save-vocab", argc, argv)) > 0) strcpy(save_vocab_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-read-vocab", argc, argv)) > 0) strcpy(read_vocab_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-debug", argc, argv)) > 0) debug_mode = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-binary", argc, argv)) > 0) binary = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-type", argc, argv)) > 0) type = atoi(argv[i + 1]);
+ if (type==0 || type==2 || type==4) alpha = 0.05;
+ if ((i = ArgPos((char *)"-alpha", argc, argv)) > 0) alpha = atof(argv[i + 1]);
+ if ((i = ArgPos((char *)"-output", argc, argv)) > 0) strcpy(output_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-window", argc, argv)) > 0) window = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-sample", argc, argv)) > 0) sample = atof(argv[i + 1]);
+ if ((i = ArgPos((char *)"-hs", argc, argv)) > 0) hs = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-negative", argc, argv)) > 0) negative = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-negative-classes", argc, argv)) > 0) strcpy(negative_classes_file, argv[i + 1]);
+ if ((i = ArgPos((char *)"-threads", argc, argv)) > 0) num_threads = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-iter", argc, argv)) > 0) iter = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-min-count", argc, argv)) > 0) min_count = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-classes", argc, argv)) > 0) classes = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-rep", argc, argv)) > 0) rep = atoi(argv[i + 1]);
+ if ((i = ArgPos((char *)"-char-state-dim", argc, argv)) > 0) {c_state_size = atoi(argv[i + 1]); c_cell_size = c_state_size;}
+ if ((i = ArgPos((char *)"-char-proj-dim", argc, argv)) > 0) {c_proj_size = atoi(argv[i + 1]);}
+ vocab = (struct vocab_word *)calloc(vocab_max_size, sizeof(struct vocab_word));
+ vocab_hash = (int *)calloc(vocab_hash_size, sizeof(int));
+ expTable = (real *)malloc((EXP_TABLE_SIZE + 1) * sizeof(real));
+ tanhTable = (real *)malloc((EXP_TABLE_SIZE + 1) * sizeof(real));
+ for (i = 0; i < EXP_TABLE_SIZE; i++) {
+ expTable[i] = exp((i / (real)EXP_TABLE_SIZE * 2 - 1) * MAX_EXP); // Precompute the exp() table
+ expTable[i] = expTable[i] / (expTable[i] + 1); // Precompute f(x) = x / (x + 1)
+ tanhTable[i] = tanh((i / (real)EXP_TABLE_SIZE * 2 - 1) * MAX_EXP);
+ }
+ TrainModel();
+ return 0;
+}