Save the tweet with tweepy and put it in word2vec and play I copied and pasted it with reference to various places, so there is not much novelty. One of the goals was to have'Shota-Man + Woman = Lori'.
CentOS7 Anaconda2-4.1.0
tweepy MeCab mecab-python gensim
I don't care about being redundant here and there.
TwStream.py
# -*- encoding:utf-8 -*-
import sys
import os
import re
import time
import tweepy
HERE = os.path.abspath(os.path.dirname(__file__))
CK = ''
CS = ''
AT = ''
AS = ''
class MyListener(tweepy.StreamListener):
	def __init__(self):
		super(MyListener, self).__init__()
	def on_status(self, status):
		try:
			tw = status.text.strip()
			#Japanese tweet only
			if re.search(u'[Ah-Hmm-Down]', tw) is not None:
				with open(HERE + '/stream.txt', 'a') as f:
					#Since tweet does not have a tab, it is used as a delimiter.
					f.write(tw.encode('utf-8') + '\n\t\n')
				print tw.encode('utf-8')
		except tweepy.TweepError as e:
			print e.reason
			if  'u\'code\': 88' in e.reason:
				print 'wait 15 min'
				time.sleep(15*60)
	def on_error(self, status_code):
		print 'error ', status_code
		if status_code == 420:
			print 'wait 15 min'
			time.sleep(15*60)
		time.sleep(10)
	def on_limit(self, status):
		print 'limit'
		time.sleep(10)
	def on_timeout(self, status):
		print 'timeout'
		time.sleep(10)
if __name__ == '__main__':
	while True:
		try:
			auth = tweepy.OAuthHandler(CK, CS)
			auth.set_access_token(AT, AS)
			print 'auth set'
			st = tweepy.Stream(auth, MyListener())
			print 'sampling'
			st.sample()
		except tweepy.TweepError as e:
			st.disconnect()
			print e.reason
			if 'u\'code\': 88' in e.reason:
				print 'wait 15 min'
				time.sleep(15*60)
		except KeyboardInterrupt:
			st.disconnect()
			break
		except:
			st.disconnect()
			continue
Now rename'stream.txt'to'raw.txt'.
W2V.py
# -*- encoding:utf-8 -*-
import sys
import os
import MeCab
import gensim
from gensim.models import word2vec
#Path of this file
HERE = os.path.abspath(os.path.dirname(__file__))
sys.path.append(HERE)
#Self-made module
from MeCabRW import *
from ProcStr import *
if __name__ == '__main__':
    MODEL = HERE + '/twitter.model'
    try:
        #Load if there is a model
        print 'loading model'
        model = word2vec.Word2Vec.load(MODEL)
        print 'model loaded'
    except:
        #Create if not
        print 'model not loaded'
        print 'creating model'
        # mt = MeCab.Tagger('-Owakati')Is possible
        mt = mtWakatiNeo()
        avoid = ['RT']
        mecabParseRW(HERE + '/raw.txt', HERE + '/sep.txt', mt, avoid)
        #Read the word-separated data
        corp = word2vec.Text8Corpus(HERE + '/sep.txt')
        #Allows you to analyze phrase by phrase
        phrcorp = gensim.models.Phrases(corp)
        model = word2vec.Word2Vec(phrcorp[corp], size=2000, min_count=2)
        model.save(MODEL)
        print 'creating done'
    pos = [u'Shota', u'woman']
    neg = [u'Man']
    sim = model.most_similar(positive=pos, negative=neg)
    print '+: ', ' '.join([i.encode('utf-8') for i in pos])
    print '-: ', ' '.join([i.encode('utf-8') for i in neg])
    print
    for i, j in sim:
        print i.encode('utf-8'), '\t', j
MeCabRW.py
# -*- coding: utf-8 -*-
import re
import MeCab
def mtWakatiNeo():
    opt = '-O wakati -d /usr/lib64/mecab/dic/mecab-ipadic-neologd'
    return MeCab.Tagger(opt)
def mecabParseRW(pathIn, pathOut, mt, avoid=[]):
    with open(pathIn, 'r') as f:
        sIn = f.read()
    #url and@[id]Removal
    sIn = re.sub('https?://[A-Za-z0-9/:%#\$&\?\(\)~\.=\+\-]+', ' ', sIn)
    sIn = re.sub('@[A-Za-z0-9_]+', ' ', sIn)
    sOut = []
    for i in sIn.split('\n\t\n'):
        if all([j not in i for j in avoid]):
            p = mt.parse(i) #Sometimes it becomes None here
            if type(p) == str: #Type check
                try:
                    p.decode('utf-8')
                    sOut.append(p)
                except:
                    continue
    sOut = '\n\t\n'.join(sOut)
    with open(pathOut, 'w') as f:
        f.write(sOut)
    return sOut
Collect and execute about 60MB of tweets
loading model
model loaded
+:Shota woman
-:Man
Monkey 0.833452105522
Macaron 0.832771897316
Loli 0.830695152283
Compliment 0.828270435333
Speaking_0.825944542885
Umehara 0.825801610947
Arisa 0.822319507599
Small breasts 0.818123817444
hundred_0.817329347134
Honda Tsubasa 0.816138386726
Isn't it a good feeling? The reverse formula has the same feeling.
loading model
model loaded
+:Loli man
-:woman
Purple 0.847893893719
hundred_0.824845731258
Shota 0.82099032402
Do 0.81635427475
Tsumugi 0.813044965267
Princess 0.812274694443
Parody 0.809535622597
Mob 0.804774940014
White 0.802413225174
Black hair 0.800325155258
I really wanted to do it on Windows + Python3, but this happened because of the character code and existing materials. I've only used Python3, so there may be some strange writing.
http://docs.tweepy.org/en/v3.5.0/streaming_how_to.html https://radimrehurek.com/gensim/models/phrases.html#module-gensim.models.phrases http://tjo.hatenablog.com/entry/2014/06/19/233949
Recommended Posts