blob: 0d6bff33df0ae25c4469566577175a64915bb997 [file] [log] [blame]
#!/usr/local/bin/perl
use Inline C;
use Mojolicious::Lite;
use Mojo::JSON qw(decode_json encode_json to_json);
use Encode qw(decode encode);
use Mojo::Server::Daemon;
plugin 'Log::Access';
print STDERR $ARGV[1];
# -cbow 1 -size 200 -window 8 -negative 25 -hs 0 -sample 1e-4 -threads 40 -binary 1 -iter 15
if(!$ARGV[1]) {
init_net("vectors15.bin");
} else {
init_net($ARGV[1]);
}
get '/' => sub {
my $c = shift;
my $word=$c->param('word');
my $no_nbs=$c->param('n') || 100;
my $no_iterations=$c->param('N') || 2000;
my $perplexity=$c->param('perplexity') || 20;
my $epsilon=$c->param('epsilon') || 5;
my $som=$c->param('som') || 0;
my @lists;
if(defined($word) && $word !~ /^\s*$/) {
$c->inactivity_timeout(300);
$word =~ s/\s+/ /g;
for my $w (split(' *\| *', $word)) {
$c->app->log->debug('Looking for neighbours of '.$w);
push(@lists, get_neighbours(encode("iso-8859-1", $w), $no_nbs));
}
}
$word =~ s/ *\| */ | /g;
$c->render(template=>"index", word=>$word, no_nbs=>$no_nbs, no_iterations => $no_iterations, epsilon=> $epsilon, perplexity=> $perplexity, show_som=>$som, lists=> \@lists);
};
app->start;
exit;
__END__
__C__
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <malloc.h>
#include <stdlib.h> //strlen
#include <sys/mman.h>
#include <pthread.h>
#define max_size 2000
#define max_w 50
#define MAX_NEIGHBOURS 1000
#define MAX_WORDS -1
#define MAX_THREADS 100
//the thread function
void *connection_handler(void *);
typedef struct {
long long *index;
float *dist;
unsigned int length;
} knn;
typedef struct {
char *token;
int N;
unsigned long from;
unsigned long upto;
} knnpars;
float *M;
char *vocab;
long long words, size;
int num_threads=20;
int init_net(char *file_name) {
FILE *f, *binvecs, *binwords;
int binwords_fd, binvecs_fd;
long long a, b, c, d, cn;
float len;
char binvecs_fname[256], binwords_fname[256];
strcpy(binwords_fname, file_name);
strcat(binwords_fname, ".words");
strcpy(binvecs_fname, file_name);
strcat(binvecs_fname, ".vecs");
f = fopen(file_name, "rb");
if (f == NULL) {
printf("Input file %s not found\n", file_name);
return -1;
}
fscanf(f, "%lld", &words);
if(MAX_WORDS > 0 && words > MAX_WORDS) words = MAX_WORDS;
fscanf(f, "%lld", &size);
if( (binvecs_fd = open(binvecs_fname, O_RDONLY)) < 0 || (binwords_fd = open(binwords_fname, O_RDONLY)) < 0) {
printf("Converting %s to memory mappable structures\n", file_name);
vocab = (char *)malloc((long long)words * max_w * sizeof(char));
M = (float *)malloc((long long)words * (long long)size * sizeof(float));
if (M == NULL) {
printf("Cannot allocate memory: %lld MB %lld %lld\n", (long long)words * size * sizeof(float) / 1048576, words, size);
return -1;
}
for (b = 0; b < words; b++) {
a = 0;
while (1) {
vocab[b * max_w + a] = fgetc(f);
if (feof(f) || (vocab[b * max_w + a] == ' ')) break;
if ((a < max_w) && (vocab[b * max_w + a] != '\n')) a++;
}
vocab[b * max_w + a] = 0;
fread(&M[b * size], sizeof(float), size, f);
len = 0;
for (a = 0; a < size; a++) len += M[a + b * size] * M[a + b * size];
len = sqrt(len);
for (a = 0; a < size; a++) M[a + b * size] /= len;
}
if( (binvecs = fopen(binvecs_fname, "wb")) != NULL && (binwords = fopen(binwords_fname, "wb")) != NULL) {
fwrite(M, sizeof(float), (long long)words * (long long)size, binvecs);
fclose(binvecs);
fwrite(vocab, sizeof(char), (long long)words * max_w, binwords);
fclose(binwords);
}
}
if( (binvecs_fd = open(binvecs_fname, O_RDONLY)) >= 0 && (binwords_fd = open(binwords_fname, O_RDONLY)) >= 0) {
M = mmap(0, sizeof(float) * (long long)words * (long long)size, PROT_READ, MAP_SHARED, binvecs_fd, 0);
vocab = mmap(0, sizeof(char) * (long long)words * max_w, PROT_READ, MAP_SHARED, binwords_fd, 0);
if (M == MAP_FAILED || vocab == MAP_FAILED) {
close(binvecs_fd);
close(binwords_fd);
fprintf(stderr, "Cannot mmap %s or %s\n", binwords_fname, binvecs_fname);
exit(-1);
}
} else {
fprintf(stderr, "Cannot open %s or %s\n", binwords_fname, binvecs_fname);
exit(-1);
}
fclose(f);
return 0;
}
void *_get_neighbours(knnpars *pars) {
char *st1 = pars->token;
int N = pars->N;
unsigned long from = pars -> from;
unsigned long upto = pars -> upto;
char file_name[max_size], st[100][max_size];
float dist, len, *bestd, vec[max_size];
long long a, b, c, d, cn, bi[100], *besti;
char ch;
knn *nbs = NULL;
besti = malloc(N * sizeof(long long));
bestd = malloc(N * sizeof(float));
float worstbest=-1;
for (a = 0; a < N; a++) bestd[a] = 0;
a = 0;
cn = 0;
b = 0;
c = 0;
while (1) {
st[cn][b] = st1[c];
b++;
c++;
st[cn][b] = 0;
if (st1[c] == 0) break;
if (st1[c] == ' ') {
cn++;
b = 0;
c++;
}
}
cn++;
for (a = 0; a < cn; a++) {
for (b = 0; b < words; b++) if (!strcmp(&vocab[b * max_w], st[a])) break;
if (b == words) b = -1;
bi[a] = b;
fprintf(stderr, "Word: \"%s\" Position in vocabulary: %lld\n", st[a], bi[a]);
if (b == -1) {
fprintf(stderr, "Out of dictionary word!\n");
cn--;
break;
}
}
if (b == -1) {
N = 0;
goto end;
}
for (a = 0; a < size; a++) vec[a] = 0;
for (b = 0; b < cn; b++) {
if (bi[b] == -1) continue;
for (a = 0; a < size; a++) vec[a] += M[a + bi[b] * size];
}
len = 0;
for (a = 0; a < size; a++) len += vec[a] * vec[a];
len = sqrt(len);
for (a = 0; a < size; a++) vec[a] /= len;
for (a = 0; a < N; a++) bestd[a] = -1;
for (c = from; c < upto; c++) {
a = 0;
// do not skip taget word
// for (b = 0; b < cn; b++) if (bi[b] == c) a = 1;
// if (a == 1) continue;
dist = 0;
for (a = 0; a < size; a++) dist += vec[a] * M[a + c * size];
if(dist > worstbest) {
for (a = 0; a < N; a++) {
if (dist > bestd[a]) {
for (d = N - 1; d > a; d--) {
bestd[d] = bestd[d - 1];
besti[d] = besti[d - 1];
}
bestd[a] = dist;
besti[a] = c;
break;
}
}
worstbest = bestd[N-1];
}
}
nbs = malloc(sizeof(knn));
nbs->index = besti;
nbs->dist = bestd;
nbs->length = N;
end:
pthread_exit(nbs);
}
SV *get_neighbours(char *st1, int N) {
float bestd[MAX_NEIGHBOURS], vec[max_size];
long long besti[MAX_NEIGHBOURS], a, b, c, d, slice;
char *bestw[MAX_NEIGHBOURS];
knn *nbs[MAX_THREADS];
knnpars pars[MAX_THREADS];
pthread_t *pt = (pthread_t *)malloc(num_threads * sizeof(pthread_t));
AV* array = newAV();
if(N>MAX_NEIGHBOURS) N=MAX_NEIGHBOURS;
slice = words / num_threads;
for(a=0; a < num_threads; a++) {
pars[a].token = st1;
pars[a].N = N;
pars[a].from = a*slice;
pars[a].upto = ((a+1)*slice > words? words:(a+1)*slice);
pthread_create(&pt[a], NULL, _get_neighbours, (void *) &pars[a]);
}
for (a = 0; a < num_threads; a++) pthread_join(pt[a], &nbs[a]);
if(!nbs[0])
goto end;
for(b=0; b < N; b++) {
besti[b] = nbs[0]->index[b];
bestd[b] = nbs[0]->dist[b];
}
for(a=1; a < num_threads; a++) {
for(b=0; b < N; b++) {
for(c=0; c < N; c++) {
if(nbs[a]->dist[b] > bestd[c]) {
for(d=N-1; d>c; d--) {
bestd[d] = bestd[d-1];
besti[d] = besti[d-1];
}
besti[c] = nbs[a]->index[b];
bestd[c] = nbs[a]->dist[b];
break;
}
}
}
}
if(nbs) {
for (a = 0; a < N; a++) {
bestw[a] = (char *)malloc(max_size * sizeof(char));
}
for (a = 0; a < N; a++) {
strcpy(bestw[a], &vocab[besti[a] * max_w]);
HV* hash = newHV();
hv_store(hash, "word", strlen("word"), newSVpvf(bestw[a], 0), 0);
hv_store(hash, "dist", strlen("dist"), newSVnv(bestd[a]), 0);
hv_store(hash, "rank", strlen("rank"), newSVuv(besti[a]), 0);
AV *vector = newAV();
for (b = 0; b < size; b++) {
av_push(vector, newSVnv(M[b + besti[a] * size]));
}
hv_store(hash, "vector", strlen("vector"), newRV_noinc((SV*)vector), 0);
av_push(array, newRV_noinc((SV*)hash));
}
}
end:
return newRV_noinc((SV*)array);
}
__DATA__
@@ index.html.ep
<!DOCTYPE html>
<html>
<head>
<title>DeReKo-Word-Vector-Distances</title>
<link rel="stylesheet" href="//code.jquery.com/ui/1.11.4/themes/smoothness/jquery-ui.css">
<script src="http://code.jquery.com/jquery-latest.min.js"></script>
<script src="//code.jquery.com/ui/1.11.4/jquery-ui.js"></script>
<script>
$(function() {
$( document ).tooltip({
content: function() {
return $(this).attr('title');
}}
)
})
</script>
<script src="//d3js.org/d3.v3.min.js" charset="utf-8"></script>
<script src="http://klinux10/word2vec/tsne.js"></script>
<script src="http://klinux10/word2vec/som.js"></script>
<script src="http://klinux10/word2vec/labeler.js"></script>
<style>
body, input {
font-family: Arial, sans-serif;
font-size: 11pt;
}
.ui-tooltip-content {
font-size: 9pt;
colour: #222222;
}
svg > .ui-tooltip-content {
font-size: 8pt;
colour: #222222;
}
#wrapper {
width: 100%;
// border: 1px solid red;
overflow: hidden; /* will contain if #first is longer than #second */
}
#first {
margin-right: 20px;
float: left;
// border: 1px solid green;
}
#second {
border: 1px solid #333;
overflow: hidden; /* if you don't want #second to wrap below #first */
}
#som2 svg {
border: 1px solid #333;
}
#cost {
font-size: 8pt;
color: #222222;
margin-top: 4px;
margin-bottom: 12px;
}
#sominfo1, #sominfo {
font-size: 8pt;
color: #222222;
margin-top: 0px;
}
#somcolor1, #somcolor2, #somcolor3 {
display: inline-block;
height: 10px;
width: 10px;
}
#third {
border: 1px solid #333;
}
</style>
<script>
var opt = {epsilon: <%= $epsilon %>, perplexity: <%= $perplexity %>},
mapWidth = 800, // width map
mapHeight = 800,
jitterRadius = 7;
var T = new tsnejs.tSNE(opt); // create a tSNE instance
var Y;
var data;
var labeler;
function applyJitter() {
svg.selectAll('.tsnet')
.data(labels)
.transition()
.duration(50)
.attr("transform", function(d, i) {
T.Y[i][0] = (d.x - mapWidth/2 - tx)/ss/20;
T.Y[i][1] = (d.y - mapHeight/2 - ty)/ss/20;
return "translate(" +
(d.x) + "," +
(d.y) + ")";
});
}
function updateEmbedding() {
var Y = T.getSolution();
svg.selectAll('.tsnet')
.data(data.words)
.attr("transform", function(d, i) {
return "translate(" +
((Y[i][0]*20*ss + tx) + mapWidth/2) + "," +
((Y[i][1]*20*ss + ty) + mapHeight/2) + ")"; });
}
var svg;
var labels = [];
var anchor_array = [];
var text;
function drawEmbedding() {
$("#embed").empty();
var div = d3.select("#embed");
// get min and max in each column of Y
var Y = T.Y;
svg = div.append("svg") // svg is global
.attr("width", mapWidth)
.attr("height", mapHeight);
var g = svg.selectAll(".b")
.data(data.words)
.enter().append("g")
.attr("class", "tsnet");
g.append("a")
.attr("xlink:href", function(word) {return "/?word="+word;})
.attr("title", function(d, i) {
return "rank: "+i +" "+"freq. rank: "+data.ranks[i].toString().replace(/\B(?=(\d{3})+(?!\d))/g, ",");
})
.append("text")
.attr("text-anchor", "top")
.attr("font-size", 12)
.attr("fill", function(d) {
if(data.target.indexOf(" "+d+" ") >= 0) {
return "red";
} else {
return "#333"
}
})
.text(function(d) { return d; });
var zoomListener = d3.behavior.zoom()
.scaleExtent([0.1, 10])
.center([0,0])
.on("zoom", zoomHandler);
zoomListener(svg);
}
var tx=0, ty=0;
var ss=1;
var iter_id=-1;
function zoomHandler() {
tx = d3.event.translate[0];
ty = d3.event.translate[1];
ss = d3.event.scale;
updateEmbedding();
}
var stepnum = 0;
function stopStep() {
clearInterval(iter_id);
text = svg.selectAll("text");
// jitter function needs different data and co-ordinate representation
labels = d3.range(data.words.length).map(function(i) {
var x = (T.Y[i][0]*20*ss + tx) + mapWidth/2;
var y = (T.Y[i][1]*20*ss + ty) + mapHeight/2;
anchor_array.push({x: x, y: y, r: jitterRadius});
return {
x: x,
y: y,
name: data.words[i]
};
});
// get the actual label bounding boxes for the jitter function
var index = 0;
text.each(function() {
labels[index].width = this.getBBox().width;
labels[index].height = this.getBBox().height;
index += 1;
});
// setTimeout(updateEmbedding, 1);
// setTimeout(
labeler = d3.labeler()
.label(labels)
.anchor(anchor_array)
.width(mapWidth)
.height(mapHeight)
.update(applyJitter);
// .start(1000);
iter_id = setInterval(jitterStep, 1);
}
var jitter_i=0;
function jitterStep() {
if(jitter_i++ > 100) {
clearInterval(iter_id);
} else {
labeler.start2(10);
applyJitter();
}
}
var last_cost=1000;
function step() {
var i = T.iter;
if(i > <%= $no_iterations %>) {
stopStep();
} else {
var cost = Math.round(T.step() * 100000) / 100000; // do a few steps
$("#cost").html("tsne iteration " + i + ", cost: " + cost.toFixed(5));
if(i % 250 == 0 && cost >= last_cost) {
stopStep();
} else {
last_cost = cost;
updateEmbedding();
}
}
}
function showMap(j) {
data=j;
T.iter=0;
T.initDataRaw(data.vecs); // init embedding
drawEmbedding(); // draw initial embedding
if(iter_id >= 0) {
clearInterval(iter_id);
}
//T.debugGrad();
iter_id = setInterval(step, 1);
if(<%= $show_som %>) {
makeSOM(j, <%= $no_iterations %>);
}
}
</script>
</head>
<body>
<form action="<%=url_for('/')->to_abs%>" method="GET">
word(s):
<input type="text" name="word" size="20" value="<%= $word %>" title="When looking for multiple words use spaces as separators to search around the average vector and | as separator to get the neighbours for each word.">
max. neighbours: <input type="text" size="8" name="n" value="<%= $no_nbs %>">
max. iterations: <input type="text" name="N" size="8" value="<%= $no_iterations %>">
SOM <input type="checkbox" name="som" value="1" <%= ($show_som ? "checked" : "") %>>
<span> </span><input type="submit" value="Show">
</form>
<br>
% if($lists) {
<div id="wrapper">
<table id="first">
<tr>
<th align="right">Pos.</th><th align="left">Word</th><th align="right">Cosine dist.</th><th>Freq. rank</th>
</tr>
% my $j=0; my @words; my @vecs; my @ranks; for my $list (@$lists) {
% my $i=1; for my $item (@$list) {
% if(!grep{$_ eq $item->{word}} @words) {
% push @vecs, $item->{vector};
% push @words, $item->{word};
% push @ranks, $item->{rank};
% }
<tr>
<td align="right">
<%= $i++ %>.
</td>
<td>
<a href="/?word=<%= $item->{word} %>">
<%= $item->{word} %>
</a>
</td>
<td align="right">
<%= sprintf("%.3f", $item->{dist}) %>
</td>
<td align="right">
<%= $item->{rank} %>
</td>
</tr>
% }
% }
</table>
<script>
% use Mojo::ByteStream 'b';
$(window).load(function() {
showMap(<%= b(Mojo::JSON::to_json({target => " $word ", words => \@words, vecs => \@vecs, ranks => \@ranks})); %>);
});
</script>
% }
<div id="second" style="width:800px; height:800px; font-family: arial;">
<div id="embed">
</div>
</div>
<div id="cost"></div>
% if($show_som) {
<div id="som2">
</div>
<div id="sominfo1"><span id="somcolor1"> </span> <span id="somword1"> </span> <span id="somcolor2"> </span> <span id="somword2"> </span> <span id="somcolor3"> </span></div>
<div id="sominfo">SOM iteration <span id="iterations">0</span></div>
% }
</div>
<p>
Word vector model based on DeReKo-2015-II. Trained with <a href="https://code.google.com/p/word2vec/">word2vec</a> using the following parameters:</p>
<pre>
-cbow 1 -size 300 -window 7 -negative 5 -hs 0 -sample 1e-5 -threads 44 -binary 1 -iter 5
</pre>
</p>
</body>
</html>