Skip to content

Commit 82189a4

Browse files
fix: tensorflow, matplotlib, ntlk package versions and twitter apis
1 parent 729cfe9 commit 82189a4

8 files changed

Lines changed: 25 additions & 46 deletions

File tree

constants.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
1-
consumer_key= 'E0pFYVai9VaOhqLiRBEC6gpGF'
2-
consumer_secret= 'XAMh4l9XL5nwFK3MN5tAjtXA2YgDN1tw5f7L2n6dz5ib8VYlbm'
1+
consumer_key= '7x7w8Ti8GmMtuhvj7IXBuvZmP'
2+
consumer_secret= 'kpFGcKiGxP9dJtlQzZ99p2OK9HM0IjA1I8n23N40VJyctOzvkG'
33

4-
access_token='3261604734-86c7DOJP98GwNeFWzvgPQKFUTyHn1ZFwlloJP3v'
5-
access_token_secret='eXEmlEAdxaFjueVP03jsAWeOeNMkI7ToiDQkyvLDa6eX7'
4+
access_token='1365544389498474497-97lzyZE9lroLNDzLhvW1dLS3MDKfV5'
5+
access_token_secret='8mF9CrBt4SvmewPZaapgL7kkh2iJus824SvhBJOvDpXlw'
66

77
num_of_tweets = int(300)

main.py

Lines changed: 14 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,9 @@
66
"""
77
#**************** IMPORT PACKAGES ********************
88
from flask import Flask, render_template, request, flash, redirect, url_for
9+
from alpha_vantage.timeseries import TimeSeries
910
import pandas as pd
1011
import numpy as np
11-
from pandas import datetime
1212
from statsmodels.tsa.arima_model import ARIMA
1313
from sklearn.metrics import mean_squared_error
1414
import matplotlib.pyplot as plt
@@ -24,8 +24,15 @@
2424
from textblob import TextBlob
2525
import constants as ct
2626
from Tweet import Tweet
27+
import nltk
28+
nltk.download('punkt')
29+
30+
# Ignore Warnings
2731
import warnings
2832
warnings.filterwarnings("ignore")
33+
import os
34+
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
35+
2936
#***************** FLASK *****************************
3037
app = Flask(__name__)
3138

@@ -53,7 +60,6 @@ def get_historical(quote):
5360
df = pd.DataFrame(data=data)
5461
df.to_csv(''+quote+'.csv')
5562
if(df.empty):
56-
from alpha_vantage.timeseries import TimeSeries
5763
ts = TimeSeries(key='N6A6QT6IBFJOPJ70',output_format='pandas')
5864
data, meta_data = ts.get_daily_adjusted(symbol='NSE:'+quote, outputsize='full')
5965
#Format df
@@ -86,13 +92,11 @@ def arima_model(train, test):
8692
for t in range(len(test)):
8793
model = ARIMA(history, order=(6,1 ,0))
8894
model_fit = model.fit(disp=0)
89-
#print(model_fit.summary())
9095
output = model_fit.forecast()
9196
yhat = output[0]
9297
predictions.append(yhat[0])
9398
obs = test[t]
9499
history.append(obs)
95-
#print('predicted=%f, expected=%f' % (yhat, obs))
96100
return predictions
97101
for company in uniqueVals[:10]:
98102
data=(df.loc[company,:]).reset_index()
@@ -102,34 +106,24 @@ def arima_model(train, test):
102106
Quantity_date['Price'] = Quantity_date['Price'].map(lambda x: float(x))
103107
Quantity_date = Quantity_date.fillna(Quantity_date.bfill())
104108
Quantity_date = Quantity_date.drop(['Date'],axis =1)
105-
#autocorrelation_plot(Quantity_date)
106-
#print(company)
107-
#plt.show()
108-
print()
109-
#print("Recent Trends in ",quote," Stock Prices: ")
110-
print()
111109
fig = plt.figure(figsize=(7.2,4.8),dpi=65)
112110
plt.plot(Quantity_date)
113111
plt.savefig('static/Trends.png')
114112
plt.close(fig)
115-
#plt.show()
116-
113+
117114
quantity = Quantity_date.values
118115
size = int(len(quantity) * 0.80)
119116
train, test = quantity[0:size], quantity[size:len(quantity)]
120117
#fit in model
121118
predictions = arima_model(train, test)
122119

123120
#plot graph
124-
print()
125-
#print("ARIMA model Accuracy: ")
126121
fig = plt.figure(figsize=(7.2,4.8),dpi=65)
127122
plt.plot(test,label='Actual Price')
128123
plt.plot(predictions,label='Predicted Price')
129124
plt.legend(loc=4)
130125
plt.savefig('static/ARIMA.png')
131126
plt.close(fig)
132-
#plt.show()
133127
print()
134128
print("##############################################################################")
135129
arima_pred=predictions[-2]
@@ -138,7 +132,6 @@ def arima_model(train, test):
138132
error_arima = math.sqrt(mean_squared_error(test, predictions))
139133
print("ARIMA RMSE:",error_arima)
140134
print("##############################################################################")
141-
print()
142135
return arima_pred, error_arima
143136

144137

@@ -194,19 +187,19 @@ def LSTM_ALGO(df):
194187
#units=no. of neurons in layer
195188
#input_shape=(timesteps,no. of cols/features)
196189
#return_seq=True for sending recc memory. For last layer, retrun_seq=False since end of the line
197-
regressor.add(Dropout(p=0.1))
190+
regressor.add(Dropout(0.1))
198191

199192
#Add 2nd LSTM layer
200193
regressor.add(LSTM(units=50,return_sequences=True))
201-
regressor.add(Dropout(p=0.1))
194+
regressor.add(Dropout(0.1))
202195

203196
#Add 3rd LSTM layer
204197
regressor.add(LSTM(units=50,return_sequences=True))
205-
regressor.add(Dropout(p=0.1))
198+
regressor.add(Dropout(0.1))
206199

207200
#Add 4th LSTM layer
208201
regressor.add(LSTM(units=50))
209-
regressor.add(Dropout(p=0.1))
202+
regressor.add(Dropout(0.1))
210203

211204
#Add o/p layer
212205
regressor.add(Dense(units=1))
@@ -271,7 +264,6 @@ def LSTM_ALGO(df):
271264
print("Tomorrow's ",quote," Closing Price Prediction by LSTM: ",lstm_pred)
272265
print("LSTM RMSE:",error_lstm)
273266
print("##############################################################################")
274-
print()
275267
return lstm_pred,error_lstm
276268
#***************** LINEAR REGRESSION SECTION ******************
277269
def LIN_REG_ALGO(df):
@@ -334,7 +326,6 @@ def LIN_REG_ALGO(df):
334326
print("Tomorrow's ",quote," Closing Price Prediction by Linear Regression: ",lr_pred)
335327
print("Linear Regression RMSE:",error_lr)
336328
print("##############################################################################")
337-
print()
338329
return df, lr_pred, forecast_set, mean, error_lr
339330
#**************** SENTIMENT ANALYSIS **************************
340331
def retrieving_tweets_polarity(symbol):
@@ -395,7 +386,6 @@ def retrieving_tweets_polarity(symbol):
395386
print("##############################################################################")
396387
print("Positive Tweets :",pos,"Negative Tweets :",neg,"Neutral Tweets :",neutral)
397388
print("##############################################################################")
398-
print()
399389
labels=['Positive','Negative','Neutral']
400390
sizes = [pos,neg,neutral]
401391
explode = (0, 0, 0)
@@ -413,37 +403,31 @@ def retrieving_tweets_polarity(symbol):
413403
print("##############################################################################")
414404
print("Tweets Polarity: Overall Positive")
415405
print("##############################################################################")
416-
print()
417406
tw_pol="Overall Positive"
418407
else:
419408
print()
420409
print("##############################################################################")
421410
print("Tweets Polarity: Overall Negative")
422411
print("##############################################################################")
423-
print()
424412
tw_pol="Overall Negative"
425413
return global_polarity,tw_list,tw_pol,pos,neg,neutral
426414

427415

428416
def recommending(df, global_polarity,today_stock,mean):
429417
if today_stock.iloc[-1]['Close'] < mean:
430418
if global_polarity > 0:
431-
print()
432-
433419
idea="RISE"
434420
decision="BUY"
435421
print()
436422
print("##############################################################################")
437423
print("According to the ML Predictions and Sentiment Analysis of Tweets, a",idea,"in",quote,"stock is expected => ",decision)
438424
elif global_polarity < 0:
439-
print()
440425
idea="FALL"
441426
decision="SELL"
442427
print()
443428
print("##############################################################################")
444429
print("According to the ML Predictions and Sentiment Analysis of Tweets, a",idea,"in",quote,"stock is expected => ",decision)
445430
else:
446-
print()
447431
idea="FALL"
448432
decision="SELL"
449433
print()
@@ -466,13 +450,11 @@ def recommending(df, global_polarity,today_stock,mean):
466450

467451
#************** PREPROCESSUNG ***********************
468452
df = pd.read_csv(''+quote+'.csv')
469-
print()
470453
print("##############################################################################")
471454
print("Today's",quote,"Stock Data: ")
472455
today_stock=df.iloc[-1:]
473456
print(today_stock)
474457
print("##############################################################################")
475-
print()
476458
df = df.dropna()
477459
code_list=[]
478460
for i in range(0,len(df)):
@@ -485,13 +467,10 @@ def recommending(df, global_polarity,today_stock,mean):
485467
arima_pred, error_arima=ARIMA_ALGO(df)
486468
lstm_pred, error_lstm=LSTM_ALGO(df)
487469
df, lr_pred, forecast_set,mean,error_lr=LIN_REG_ALGO(df)
488-
print()
489-
#print("Recent %s related Tweets & News: " % quote)
490470
polarity,tw_list,tw_pol,pos,neg,neutral = retrieving_tweets_polarity(quote)
491471

492-
print()
493-
#print("Generating recommendation based on prediction & polarity...")
494472
idea, decision=recommending(df, polarity,today_stock,mean)
473+
print()
495474
print("Forecasted Prices for Next 7 days:")
496475
print(forecast_set)
497476
today_stock=today_stock.round(2)

requirements.txt

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,16 @@
1+
tensorflow
12
nltk
2-
keras==2.3.1
3+
keras
34
numpy
4-
streamlit==0.52.1
5+
streamlit
56
seaborn
67
tweepy
78
textblob
89
flask
910
pandas
10-
matplotlib
11+
matplotlib==3.2
1112
scikit_learn
12-
statsmodels==0.10.1
13-
yfinance==0.1.54
14-
alpha_vantage==2.1.3
15-
tensorflow==1.14.0
13+
statsmodels
14+
yfinance
15+
alpha_vantage
1616
https://pypi.anaconda.org/berber/simple/tweet-preprocessor/0.5.0/tweet-preprocessor-0.5.0.tar.gz

static/ARIMA.png

-118 Bytes
Loading

static/LR.png

-3.29 KB
Loading

static/LSTM.png

922 Bytes
Loading

static/SA.png

22 Bytes
Loading

static/Trends.png

-3.56 KB
Loading

0 commit comments

Comments
 (0)