blob: a83f37e01d97d8c3c834d55de4e24be61321e1f6 [file] [log] [blame]
#!/usr/local/bin/perl
use Inline C;
use Mojolicious::Lite;
use Mojo::JSON qw(decode_json encode_json to_json);
use Encode qw(decode encode);
use Getopt::Std;
use Mojo::Server::Daemon;
plugin 'Log::Access';
our $opt_i = 0; # latin1-input?
our $opt_l = undef;
our $opt_p = 5676;
our $opt_n = undef;
getopt('il:p:n:');
# -cbow 1 -size 200 -window 8 -negative 25 -hs 0 -sample 1e-4 -threads 40 -binary 1 -iter 15
if(!$ARGV[0]) {
init_net("vectors15.bin", $opt_n, ($opt_i? 1 : 0));
} else {
init_net($ARGV[0], $opt_n, ($opt_i? 1 : 0));
}
my $daemon = Mojo::Server::Daemon->new(
app => app,
listen => ['http://'.($opt_l ? $opt_l : '*').":$opt_p"]
);
get '/' => sub {
my $c = shift;
my $word=$c->param('word');
my $no_nbs=$c->param('n') || 100;
my $no_iterations=$c->param('N') || 2000;
my $perplexity=$c->param('perplexity') || 20;
my $epsilon=$c->param('epsilon') || 5;
my $som=$c->param('som') || 0;
my $res;
my @lists;
my @collocations;
if(defined($word) && $word !~ /^\s*$/) {
$c->inactivity_timeout(300);
$word =~ s/\s+/ /g;
for my $w (split(' *\| *', $word)) {
$c->app->log->debug('Looking for neighbours of '.$w);
if($opt_i) {
$res = get_neighbours(encode("iso-8859-1", $w), $no_nbs);
} else {
$res = get_neighbours($w, $no_nbs);
}
push(@lists, $res->{paradigmatic});
}
}
$word =~ s/ *\| */ | /g;
$c->render(template=>"index", word=>$word, no_nbs=>$no_nbs, no_iterations => $no_iterations, epsilon=> $epsilon, perplexity=> $perplexity, show_som=>$som, lists=> \@lists, collocators=> $res->{syntagmatic});
};
$daemon->run; # app->start;
exit;
__END__
__C__
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <malloc.h>
#include <stdlib.h> //strlen
#include <sys/mman.h>
#include <pthread.h>
#define max_size 2000
#define max_w 50
#define MAX_NEIGHBOURS 1000
#define MAX_WORDS -1
#define MAX_THREADS 100
#define MAX_CC 50
#define EXP_TABLE_SIZE 1000
#define MAX_EXP 6
//the thread function
void *connection_handler(void *);
typedef struct {
long long *index;
float *dist;
float *norm;
long long *pos;
unsigned int length;
} knn;
typedef struct {
long long wordi[MAX_NEIGHBOURS];
char sep[MAX_NEIGHBOURS];
int length;
} wordlist;
typedef struct {
wordlist *wl;
char *token;
int N;
long from;
unsigned long upto;
} knnpars;
float *M, *syn1neg_window, *expTable;
char *vocab;
long long words, size;
int num_threads=20;
int latin_enc=0;
int window;
int init_net(char *file_name, char *net_name, int latin) {
FILE *f, *binvecs, *binwords;
int binwords_fd, binvecs_fd, net_fd, i;
long long a, b, c, d, cn;
float len;
char binvecs_fname[256], binwords_fname[256];
strcpy(binwords_fname, file_name);
strcat(binwords_fname, ".words");
strcpy(binvecs_fname, file_name);
strcat(binvecs_fname, ".vecs");
latin_enc = latin;
f = fopen(file_name, "rb");
if (f == NULL) {
printf("Input file %s not found\n", file_name);
return -1;
}
fscanf(f, "%lld", &words);
if(MAX_WORDS > 0 && words > MAX_WORDS) words = MAX_WORDS;
fscanf(f, "%lld", &size);
if( (binvecs_fd = open(binvecs_fname, O_RDONLY)) < 0 || (binwords_fd = open(binwords_fname, O_RDONLY)) < 0) {
printf("Converting %s to memory mappable structures\n", file_name);
vocab = (char *)malloc((long long)words * max_w * sizeof(char));
M = (float *)malloc((long long)words * (long long)size * sizeof(float));
if (M == NULL) {
printf("Cannot allocate memory: %lld MB %lld %lld\n", (long long)words * size * sizeof(float) / 1048576, words, size);
return -1;
}
for (b = 0; b < words; b++) {
a = 0;
while (1) {
vocab[b * max_w + a] = fgetc(f);
if (feof(f) || (vocab[b * max_w + a] == ' ')) break;
if ((a < max_w) && (vocab[b * max_w + a] != '\n')) a++;
}
vocab[b * max_w + a] = 0;
fread(&M[b * size], sizeof(float), size, f);
len = 0;
for (a = 0; a < size; a++) len += M[a + b * size] * M[a + b * size];
len = sqrt(len);
for (a = 0; a < size; a++) M[a + b * size] /= len;
}
if( (binvecs = fopen(binvecs_fname, "wb")) != NULL && (binwords = fopen(binwords_fname, "wb")) != NULL) {
fwrite(M, sizeof(float), (long long)words * (long long)size, binvecs);
fclose(binvecs);
fwrite(vocab, sizeof(char), (long long)words * max_w, binwords);
fclose(binwords);
}
}
if( (binvecs_fd = open(binvecs_fname, O_RDONLY)) >= 0 && (binwords_fd = open(binwords_fname, O_RDONLY)) >= 0) {
M = mmap(0, sizeof(float) * (long long)words * (long long)size, PROT_READ, MAP_SHARED, binvecs_fd, 0);
vocab = mmap(0, sizeof(char) * (long long)words * max_w, PROT_READ, MAP_SHARED, binwords_fd, 0);
if (M == MAP_FAILED || vocab == MAP_FAILED) {
close(binvecs_fd);
close(binwords_fd);
fprintf(stderr, "Cannot mmap %s or %s\n", binwords_fname, binvecs_fname);
exit(-1);
}
} else {
fprintf(stderr, "Cannot open %s or %s\n", binwords_fname, binvecs_fname);
exit(-1);
}
fclose(f);
if(net_name) {
if( (net_fd = open(net_name, O_RDONLY)) >= 0) {
window = (lseek(net_fd, 0, SEEK_END) - sizeof(float) * words * size) / words / size / sizeof(float) / 2;
// lseek(net_fd, sizeof(float) * words * size, SEEK_SET);
munmap(M, sizeof(float) * words * size);
M = mmap(0, sizeof(float) * words * size + sizeof(float) * 2 * window * size * words, PROT_READ, MAP_SHARED, net_fd, 0);
if (M == MAP_FAILED) {
close(net_fd);
fprintf(stderr, "Cannot mmap %s\n", net_name);
exit(-1);
}
syn1neg_window = M + words * size;
} else {
fprintf(stderr, "Cannot open %s\n", net_name);
exit(-1);
}
fprintf(stderr, "Successfully memmaped %s. Determined window size: %d\n", net_name, window);
}
expTable = (float *) malloc((EXP_TABLE_SIZE + 1) * sizeof(float));
for (i = 0; i < EXP_TABLE_SIZE; i++) {
expTable[i] = exp((i / (float) EXP_TABLE_SIZE * 2 - 1) * MAX_EXP); // Precompute the exp() table
expTable[i] = expTable[i] / (expTable[i] + 1); // Precompute f(x) = x / (x + 1)
}
return 0;
}
knn *getCollocators(int cc, int N) {
knn *nbs = NULL;
long window_layer_size = size * window * 2;
long a, b, c, d, e, window_offset, target, max_target=0, maxmax_target;
float f, max_f, maxmax_f;
float *target_sums, *bestf, *bestn, worstbest, wpos_sum;
long long *besti, *bestp;
if(cc == -1)
return NULL;
a = posix_memalign((void **) &target_sums, 128, words * sizeof(float));
besti = malloc(N * sizeof(long long));
bestp = malloc(N * sizeof(long long));
bestf = malloc(N * sizeof(float));
bestn = malloc(N * sizeof(float));
for (b = 0; b < words; b++)
target_sums[b]=0;
for (b = 0; b < N; b++) {
bestn[b] = 1;
bestf[b] = -1;
}
worstbest = -1;
d = cc;
maxmax_f = -1;
maxmax_target = 0;
besti[0]=d;
bestf[0]=1.0;
bestp[0]=0;
for (a = window * 2 + 1; a >=0; a--) {
wpos_sum = 0;
printf("window pos: %ld\n", a);
if (a != window) {
max_f = -1;
window_offset = a * size;
if (a > window)
window_offset -= size;
for(target = 0; target < words / 2; target ++) {
if(target == d)
continue;
f = 0;
for (c = 0; c < size; c++)
f += M[d* size + c] * syn1neg_window[target * window_layer_size + window_offset + c];
if (f < -MAX_EXP)
continue;
else if (f > MAX_EXP)
continue;
else
f = expTable[(int) ((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
wpos_sum += f;
if(f > max_f) {
max_f = f;
max_target = target;
}
target_sums[target] += (1-target_sums[target]) * f;
if(f > worstbest) {
for (b = 0; b < N; b++) {
if (f > bestf[b]) {
for (e = N - 1; e > b; e--) {
bestf[e] = bestf[e - 1];
besti[e] = besti[e - 1];
bestp[e] = bestp[e - 1];
}
bestf[b] = f;
besti[b] = target;
bestp[b] = window-a;
break;
}
}
worstbest = bestf[N-1];
}
}
printf("%d %.2f\n", max_target, max_f);
printf("%s (%.2f) ", &vocab[max_target * max_w], max_f);
if(max_f > maxmax_f) {
maxmax_f = max_f;
maxmax_target = max_target;
}
for (b = 0; b < N; b++)
if(bestp[b] == window-a)
bestn[b] = bestf[b] / wpos_sum;
} else {
printf("\x1b[1m%s\x1b[0m ", &vocab[d*max_w]);
}
}
max_f = -1;
for (b = 0; b < words; b++) {
if(target_sums[b] > max_f) {
max_f = target_sums[b];
max_target = b;
}
}
printf(" -- max sum: %s (%.2f), max resp.: \x1b[1m%s\x1b[0m (%.2f)\n",
&vocab[max_target * max_w], max_f,
&vocab[maxmax_target * max_w], maxmax_f);
for(b=0; b<N; b++)
printf("%-32s %.2f %d\n", &vocab[besti[b]*max_w], bestf[b], bestp[b]);
printf("\n");
free(target_sums);
nbs = malloc(sizeof(knn));
nbs->index = besti;
nbs->dist = bestf;
nbs->norm = bestn;
nbs->pos = bestp;
nbs->length = N;
return(nbs);
}
wordlist *getTargetWords(char *st1) {
wordlist *wl = malloc(sizeof(wordlist));
char st[100][max_size], sep[100];
long a, b=0, c=0, cn=0;
while (1) {
st[cn][b] = st1[c];
b++;
c++;
st[cn][b] = 0;
if (st1[c] == 0) break;
if (st1[c] == ' ' || st1[c] == '-') {
sep[cn++] = st1[c];
b = 0;
c++;
}
}
cn++;
for (a = 0; a < cn; a++) {
for (b = 0; b < words; b++) if (!strcmp(&vocab[b * max_w], st[a])) break;
if (b == words) b = -1;
wl->wordi[a] = b;
fprintf(stderr, "Word: \"%s\" Position in vocabulary: %lld\n", st[a], wl->wordi[a]);
if (b == -1) {
fprintf(stderr, "Out of dictionary word!\n");
cn--;
break;
}
}
wl->length=cn;
return(wl);
}
void *_get_neighbours(knnpars *pars) {
char *st1 = pars->token;
int N = pars->N;
long from = pars -> from;
unsigned long upto = pars -> upto;
char file_name[max_size], st[100][max_size], *sep;
float dist, len, *bestd, vec[max_size];
long long a, b, c, d, cn, *bi, *besti;
char ch;
knn *nbs = NULL;
wordlist *wl = pars->wl;
besti = malloc(N * sizeof(long long));
bestd = malloc(N * sizeof(float));
float worstbest=-1;
for (a = 0; a < N; a++) bestd[a] = 0;
a = 0;
bi = wl->wordi;
cn = wl->length;
sep = wl->sep;
b = bi[0];
c = 0;
if(from < 0) {
nbs = getCollocators(b, pars->N);
pthread_exit(nbs);
}
if (b == -1) {
N = 0;
goto end;
}
for (a = 0; a < size; a++) vec[a] = 0;
for (b = 0; b < cn; b++) {
if (bi[b] == -1) continue;
if(b>0 && sep[b-1] == '-')
for (a = 0; a < size; a++) vec[a] -= M[a + bi[b] * size];
else
for (a = 0; a < size; a++) vec[a] += M[a + bi[b] * size];
}
len = 0;
for (a = 0; a < size; a++) len += vec[a] * vec[a];
len = sqrt(len);
for (a = 0; a < size; a++) vec[a] /= len;
for (a = 0; a < N; a++) bestd[a] = -1;
for (c = from; c < upto; c++) {
a = 0;
// do not skip taget word
// for (b = 0; b < cn; b++) if (bi[b] == c) a = 1;
// if (a == 1) continue;
dist = 0;
for (a = 0; a < size; a++) dist += vec[a] * M[a + c * size];
if(dist > worstbest) {
for (a = 0; a < N; a++) {
if (dist > bestd[a]) {
for (d = N - 1; d > a; d--) {
bestd[d] = bestd[d - 1];
besti[d] = besti[d - 1];
}
bestd[a] = dist;
besti[a] = c;
break;
}
}
worstbest = bestd[N-1];
}
}
nbs = malloc(sizeof(knn));
nbs->index = besti;
nbs->dist = bestd;
nbs->length = N;
end:
pthread_exit(nbs);
}
SV *get_neighbours(char *st1, int N) {
HV *result = newHV();
float bestd[MAX_NEIGHBOURS], bestn[MAX_NEIGHBOURS], vec[max_size];
long long besti[MAX_NEIGHBOURS], bestp[MAX_NEIGHBOURS], a, b, c, d, slice;
char *bestw[MAX_NEIGHBOURS];
knn *nbs[MAX_THREADS];
knnpars pars[MAX_THREADS];
pthread_t *pt = (pthread_t *)malloc((num_threads+1) * sizeof(pthread_t));
wordlist *wl;
if(N>MAX_NEIGHBOURS) N=MAX_NEIGHBOURS;
slice = words / num_threads;
wl = getTargetWords(st1);
a = num_threads;
pars[a].token = st1;
pars[a].wl = wl;
pars[a].N = N;
pars[a].from = -1;
pthread_create(&pt[a], NULL, _get_neighbours, (void *) &pars[a]);
for(a=0; a < num_threads; a++) {
pars[a].token = st1;
pars[a].wl = wl;
pars[a].N = N;
pars[a].from = a*slice;
pars[a].upto = ((a+1)*slice > words? words:(a+1)*slice);
pthread_create(&pt[a], NULL, _get_neighbours, (void *) &pars[a]);
}
for (a = 0; a < num_threads; a++) pthread_join(pt[a], &nbs[a]);
pthread_join(pt[a], &nbs[a]);
if(!nbs[0])
goto end;
for(b=0; b < N; b++) {
besti[b] = nbs[0]->index[b];
bestd[b] = nbs[0]->dist[b];
}
for(a=1; a < num_threads; a++) {
for(b=0; b < N; b++) {
for(c=0; c < N; c++) {
if(nbs[a]->dist[b] > bestd[c]) {
for(d=N-1; d>c; d--) {
bestd[d] = bestd[d-1];
besti[d] = besti[d-1];
}
besti[c] = nbs[a]->index[b];
bestd[c] = nbs[a]->dist[b];
break;
}
}
}
}
if(nbs) {
AV* array = newAV();
for (a = 0; a < N; a++) {
bestw[a] = (char *)malloc(max_size * sizeof(char));
}
for (a = 0; a < N; a++) {
strcpy(bestw[a], &vocab[besti[a] * max_w]);
HV* hash = newHV();
SV* word = newSVpvf(bestw[a], 0);
if(latin_enc == 0) SvUTF8_on(word);
hv_store(hash, "word", strlen("word"), word , 0);
hv_store(hash, "dist", strlen("dist"), newSVnv(bestd[a]), 0);
hv_store(hash, "rank", strlen("rank"), newSVuv(besti[a]), 0);
AV *vector = newAV();
for (b = 0; b < size; b++) {
av_push(vector, newSVnv(M[b + besti[a] * size]));
}
hv_store(hash, "vector", strlen("vector"), newRV_noinc((SV*)vector), 0);
av_push(array, newRV_noinc((SV*)hash));
}
hv_store(result, "paradigmatic", strlen("paradigmatic"), newRV_noinc((SV*)array), 0);
for(b=0; b < nbs[num_threads]->length; b++) {
besti[b] = nbs[num_threads]->index[b];
bestd[b] = nbs[num_threads]->dist[b];
bestn[b] = nbs[num_threads]->norm[b];
bestp[b] = nbs[num_threads]->pos[b];
}
array = newAV();
for (a = 0; a < nbs[num_threads]->length; a++) {
strcpy(bestw[a], &vocab[besti[a] * max_w]);
HV* hash = newHV();
SV* word = newSVpvf(bestw[a], 0);
if(latin_enc == 0) SvUTF8_on(word);
hv_store(hash, "word", strlen("word"), word , 0);
hv_store(hash, "dist", strlen("dist"), newSVnv(bestd[a]), 0);
hv_store(hash, "norm", strlen("norm"), newSVnv(bestn[a]), 0);
hv_store(hash, "pos", strlen("pos"), newSVnv(bestp[a]), 0);
av_push(array, newRV_noinc((SV*)hash));
}
hv_store(result, "syntagmatic", strlen("syntagmatic"), newRV_noinc((SV*)array), 0);
}
end:
return newRV_noinc((SV*)result);
}
__DATA__
@@ index.html.ep
<!DOCTYPE html>
<html>
<head>
<title>DeReKo-Word-Vector-Distances</title>
<link rel="stylesheet" href="//code.jquery.com/ui/1.11.4/themes/smoothness/jquery-ui.css">
<script src="http://code.jquery.com/jquery-latest.min.js"></script>
<script src="//code.jquery.com/ui/1.11.4/jquery-ui.js"></script>
<script>
$(function() {
$( document ).tooltip({
content: function() {
return $(this).attr('title');
}}
)
})
</script>
<script src="//d3js.org/d3.v3.min.js" charset="utf-8"></script>
<script src="http://klinux10/word2vec/tsne.js"></script>
<script src="http://klinux10/word2vec/som.js"></script>
<script src="http://klinux10/word2vec/labeler.js"></script>
<style>
body, input {
font-family: Arial, sans-serif;
font-size: 11pt;
}
.ui-tooltip-content {
font-size: 9pt;
colour: #222222;
}
svg > .ui-tooltip-content {
font-size: 8pt;
colour: #222222;
}
#collocators {
margin-bottom: 15px;
}
#wrapper {
width: 100%;
// border: 1px solid red;
overflow: hidden; /* will contain if #first is longer than #second */
}
#first {
margin-right: 20px;
float: left;
// border: 1px solid green;
}
#second {
border: 1px solid #333;
overflow: hidden; /* if you don't want #second to wrap below #first */
}
#som2 svg {
border: 1px solid #333;
}
#cost {
font-size: 8pt;
color: #222222;
margin-top: 4px;
margin-bottom: 12px;
}
#sominfo1, #sominfo {
font-size: 8pt;
color: #222222;
margin-top: 0px;
}
#somcolor1, #somcolor2, #somcolor3 {
display: inline-block;
height: 10px;
width: 10px;
}
#third {
border: 1px solid #333;
}
</style>
<script>
var opt = {epsilon: <%= $epsilon %>, perplexity: <%= $perplexity %>},
mapWidth = 800, // width map
mapHeight = 800,
jitterRadius = 7;
var T = new tsnejs.tSNE(opt); // create a tSNE instance
var Y;
var data;
var labeler;
function applyJitter() {
svg.selectAll('.tsnet')
.data(labels)
.transition()
.duration(50)
.attr("transform", function(d, i) {
T.Y[i][0] = (d.x - mapWidth/2 - tx)/ss/20;
T.Y[i][1] = (d.y - mapHeight/2 - ty)/ss/20;
return "translate(" +
(d.x) + "," +
(d.y) + ")";
});
}
function updateEmbedding() {
var Y = T.getSolution();
svg.selectAll('.tsnet')
.data(data.words)
.attr("transform", function(d, i) {
return "translate(" +
((Y[i][0]*20*ss + tx) + mapWidth/2) + "," +
((Y[i][1]*20*ss + ty) + mapHeight/2) + ")"; });
}
var svg;
var labels = [];
var anchor_array = [];
var text;
function drawEmbedding() {
$("#embed").empty();
var div = d3.select("#embed");
// get min and max in each column of Y
var Y = T.Y;
svg = div.append("svg") // svg is global
.attr("width", mapWidth)
.attr("height", mapHeight);
var g = svg.selectAll(".b")
.data(data.words)
.enter().append("g")
.attr("class", "tsnet");
g.append("a")
.attr("xlink:href", function(word) {return "/?word="+word;})
.attr("title", function(d, i) {
return "rank: "+i +" "+"freq. rank: "+data.ranks[i].toString().replace(/\B(?=(\d{3})+(?!\d))/g, ",");
})
.append("text")
.attr("text-anchor", "top")
.attr("font-size", 12)
.attr("fill", function(d) {
if(data.target.indexOf(" "+d+" ") >= 0) {
return "red";
} else {
return "#333"
}
})
.text(function(d) { return d; });
var zoomListener = d3.behavior.zoom()
.scaleExtent([0.1, 10])
.center([0,0])
.on("zoom", zoomHandler);
zoomListener(svg);
}
var tx=0, ty=0;
var ss=1;
var iter_id=-1;
function zoomHandler() {
tx = d3.event.translate[0];
ty = d3.event.translate[1];
ss = d3.event.scale;
updateEmbedding();
}
var stepnum = 0;
function stopStep() {
clearInterval(iter_id);
text = svg.selectAll("text");
// jitter function needs different data and co-ordinate representation
labels = d3.range(data.words.length).map(function(i) {
var x = (T.Y[i][0]*20*ss + tx) + mapWidth/2;
var y = (T.Y[i][1]*20*ss + ty) + mapHeight/2;
anchor_array.push({x: x, y: y, r: jitterRadius});
return {
x: x,
y: y,
name: data.words[i]
};
});
// get the actual label bounding boxes for the jitter function
var index = 0;
text.each(function() {
labels[index].width = this.getBBox().width;
labels[index].height = this.getBBox().height;
index += 1;
});
// setTimeout(updateEmbedding, 1);
// setTimeout(
labeler = d3.labeler()
.label(labels)
.anchor(anchor_array)
.width(mapWidth)
.height(mapHeight)
.update(applyJitter);
// .start(1000);
iter_id = setInterval(jitterStep, 1);
}
var jitter_i=0;
function jitterStep() {
if(jitter_i++ > 100) {
clearInterval(iter_id);
} else {
labeler.start2(10);
applyJitter();
}
}
var last_cost=1000;
function step() {
var i = T.iter;
if(i > <%= $no_iterations %>) {
stopStep();
} else {
var cost = Math.round(T.step() * 100000) / 100000; // do a few steps
$("#cost").html("tsne iteration " + i + ", cost: " + cost.toFixed(5));
if(i % 250 == 0 && cost >= last_cost) {
stopStep();
} else {
last_cost = cost;
updateEmbedding();
}
}
}
function showMap(j) {
data=j;
T.iter=0;
T.initDataRaw(data.vecs); // init embedding
drawEmbedding(); // draw initial embedding
if(iter_id >= 0) {
clearInterval(iter_id);
}
//T.debugGrad();
iter_id = setInterval(step, 1);
if(<%= $show_som %>) {
makeSOM(j, <%= $no_iterations %>);
}
}
</script>
</head>
<body>
<form action="<%=url_for('/')->to_abs%>" method="GET">
word(s):
<input type="text" name="word" size="20" value="<%= $word %>" title="When looking for multiple words use spaces as separators to search around the average vector and | as separator to get the neighbours for each word.">
max. neighbours: <input type="text" size="8" name="n" value="<%= $no_nbs %>">
max. iterations: <input type="text" name="N" size="8" value="<%= $no_iterations %>">
SOM <input type="checkbox" name="som" value="1" <%= ($show_som ? "checked" : "") %>>
<span> </span><input type="submit" value="Show">
</form>
<br>
% if($lists) {
<div id="wrapper">
<table id="first">
<tr>
<th align="right">#</th><th align="right">cos</th><th align="left">paradigmatic</th><th title="Position in winodw around target word. Absolute value can be too low because of sub-sampling frequent words.">@</th><th align="right" title="&#34;Responsivenes&#34; of the collocator at the relative position @. Approximation of the probability that the combination of the target word and the collocator at the relative position @ come from the corpus.">resp.</th><th title="Probability of the collocator at window location @."align="right">p(c<sub><small>@</small></sub>)</th><th align="left">syntagmatic</th>
</tr>
% my $j=0; my @words; my @vecs; my @ranks; for my $list (@$lists) {
% my $i=0; for my $item (@$list) {
% my $c = (@$collocators)[$i];
% if(!grep{$_ eq $item->{word}} @words) {
% push @vecs, $item->{vector};
% push @words, $item->{word};
% push @ranks, $item->{rank};
% }
<tr>
<td align="right">
<%= ++$i %>.
</td>
<td align="right">
<%= sprintf("%.3f", $item->{dist}) %>
</td>
<td>
<a title="freq. rank: <%= $item->{rank} %>" href="/?word=<%= $item->{word} %>">
<%= $item->{word} %>
</a>
</td>
<td align="right">
<%= $c->{pos} %>:
</td>
<td align="right">
<%= sprintf("%.3f", $c->{dist}) %>
</td>
<td align="right">
<%= sprintf("%.3e", $c->{norm}) %>
</td>
<td align="left">
<a href="/?word=<%= $c->{word} %>">
<%= $c->{word} %>
</td>
</tr>
% }
% }
</table>
<script>
% use Mojo::ByteStream 'b';
$(window).load(function() {
showMap(<%= b(Mojo::JSON::to_json({target => " $word ", words => \@words, vecs => \@vecs, ranks => \@ranks})); %>);
});
</script>
% }
<div id="second" style="width:800px; height:800px; font-family: arial;">
<div id="embed">
</div>
</div>
<div id="cost"></div>
% if($show_som) {
<div id="som2">
</div>
<div id="sominfo1"><span id="somcolor1"> </span> <span id="somword1"> </span> <span id="somcolor2"> </span> <span id="somword2"> </span> <span id="somcolor3"> </span></div>
<div id="sominfo">SOM iteration <span id="iterations">0</span></div>
% }
</div>
<p>
Word vector model based on DeReKo-2015-II. Trained with <a href="https://code.google.com/p/word2vec/">word2vec</a> using the following parameters:</p>
<pre>
-cbow 1 -size 300 -window 7 -negative 5 -hs 0 -sample 1e-5 -threads 44 -binary 1 -iter 5
</pre>
</p>
</body>
</html>