By : user2173644
Date : October 20 2020, 08:10 AM

Does that help Looks like you're almost there. You fit the model on the training data, and made sure to do the backtest on the test set, which is the right thing to do. Some things you want to be careful about though: don't set prefer = low in add.rule for enter signals; you'll never know in advance where the low was in real trading, for filling on the next bar. code :
rm(list=ls())
require(quantstrat)
require(PerformanceAnalytics)
set.seed(1234)
#setting up some initial parameters for the quantstrat trading model
initDate="20070101"
from < "20170101"
to < "20181201"
init_equity < 1000
adjustment < TRUE
.orderqty < 10
.txnfees < 10
currency('USD')
Sys.setenv(TZ="UTC")
#Collect the data
symbols < c('GOOG')
.data < new.env()
getSymbols(symbols, from=from, to=to, src="yahoo", adjust=TRUE, env = .data)
colnames(.data$GOOG) < c("open", "high", "low", "close", "volume", "adjusted")
mdata < .data$GOOG
# create the dependent variable for a logistic regression
mdata$direction < with(mdata, ifelse(close >= open, 1, 0))
#create two basic input variables  lagged
mdata$rsi < RSI(mdata$close, nFast=14, nSlow = 26, nSig = 9, maType = SMA)
mdata$momentum < momentum(mdata$close, n = 12)
mdata < mdata[complete.cases(mdata), ]
mdata$direction_fwd < lag.xts(mdata$direction, k = 1)
# create a training and test set
train_date < nrow(mdata) *0.8
train < mdata[1:train_date,]
test < mdata[c(1:train_date),]
#Run a simple logistic regression and obtain predicted probabilities
lm.fit < glm(direction_fwd ~ rsi + momentum, data = train, family = binomial)
summary(lm.fit)
pr.lm < predict(lm.fit, test, type = "response")
test$pred_prob < pr.lm
#Add out predictions to the TEST data if its greater than 0.6
test$prediction < ifelse(pr.lm > 0.6, 1, 0)
paste0("Accuracy: ", mean(test$direction_fwd == test$prediction, na.rm = T))
# Simple way to run applyStrategy is to make sure the data for the symbol is in a variable with its name, like so:
GOOG < test
stock("GOOG", currency="USD", multiplier=1)
strategy.st < portfolio.st < account.st < "LogisticRegressionStrategy"
rm.strat(strategy.st)
rm.strat(portfolio.st)
rm.strat(account.st)
initPortf(name = portfolio.st,
symbols = symbols,
initDate = initDate,
currency = 'USD')
initAcct(name = account.st,
portfolios = portfolio.st,
initDate = initDate,
currency = 'USD',
initEq = init_equity)
initOrders(portfolio.st,
symbols = symbols,
initDate = initDate)
strategy(strategy.st, store = TRUE)
nMult_orderqty < 2
addPosLimit(portfolio.st, symbol = "GOOG", timestamp = initDate, maxpos = nMult_orderqty * .orderqty)
# Buy when prob exceeds 0.6 for the first time, using cross= TRUE
add.signal(strategy = strategy.st,
name = "sigThreshold",
arguments = list(threshold=0.6, column="pred_prob", relationship="gt", cross= TRUE),
label = "longSig")
#exit when prob drops below 0.5 for the first time
add.signal(strategy = strategy.st,
name = "sigThreshold",
arguments = list(threshold=0.5, column="pred_prob", relationship="lt", cross= TRUE),
label = "exitLongSig")
# Adding the rules, enter at the low price when "prediction" = 1, taking transaction fees into account
add.rule(strategy = strategy.st,
name = "ruleSignal",
arguments = list(sigcol = "longSig",
sigval = 1,
orderqty = .orderqty,
ordertype = "market",
orderside = "long",
osFUN = osMaxPos,
prefer = "Open", #Never kknow the low in advance. Use the open, as it is for the next day (be aware that the open price for bar data has its own problems too)
TxnFees = .txnfees,
replace = FALSE),
type = "enter",
label = "EnterLONG")
# As soon as the Logistic regression predicts a "0" we dump all our shares in GOOG
add.rule(strategy.st,
name = "ruleSignal",
arguments = list(sigcol = "exitLongSig",
sigval = 1,
ordertype = "market",
orderside = "long",
orderqty = "all",
TxnFees = .txnfees,
replace = TRUE),
type = "exit",
label = "Exit2SHORT")
applyStrategy(strategy.st, portfolios = portfolio.st)
updatePortf(portfolio.st)
updateAcct(account.st)
updateEndEq(account.st)
chart.Posn(portfolio.st, Symbol = "GOOG",
TA="add_SMA(n = 10, col = 2); add_SMA(n = 30, col = 4)")
Share :

Producing logistic curve for my logistic regression model
By : SunnyCee
Date : March 29 2020, 07:55 AM
like below fixes the issue You have 2 continuous, noncategorical variables, so the logistic curve will be a 3D curve. I will offer you two ways for presentation. use persp function to produce a real 3D smooth curve; fix v at a number of values, then produce a number of 2D logistic curve (which you called "S"shape curve). code :
press_grid < seq(200, 480, by = 5)
v_grid < seq(0.6, 1.5, by = 0.1)
newdat < data.frame(press = rep(press_grid, times = length(v_grid)), v = rep(v_grid, each = length(press_grid)))
pred < predict.glm(mylogit, newdata = newdat, type="response")
z < matrix(pred, length(press_grid))
persp(press_grid, v_grid, z, xlab = "pressure", ylab = "velocity", zlab = "predicted probability", main = "logistic curve (3D)", theta = 30, phi = 20)
curve_2D_fix_v < function(model, v = 1, press_grid = seq(200, 480, by = 5), add = FALSE, col = "black") {
newdat < data.frame(press = press_grid, v = v)
pred < predict.glm(model, newdat, type = "response")
if (add) lines(press_grid, pred, col = col) else {
plot(press_grid, pred, xlab = "pressure", ylab = "predicted probability", type = "l", col = col, main = "logistic curve (2D)")
abline(h = c(0, 0.5, 1), lty = 2, col = col)
}
}
curve_2D_fix_v(mylogit, v = 0.4, add = FALSE, col = "black")
curve_2D_fix_v(mylogit, v = 0.6, add = TRUE, col = "red")
curve_2D_fix_v(mylogit, v = 0.8, add = TRUE, col = "green")
curve_2D_fix_v(mylogit, v = 1, add = TRUE, col = "blue")
curve_2D_fix_v(mylogit, v = 1.2, add = TRUE, col = "cyan")
curve_2D_fix_v(mylogit, v = 0.4, add = TRUE, col = "yellow")
> summary(mylogit)
Coefficients:
Estimate Std. Error z value Pr(>z)
(Intercept) 8.08326 4.45463 1.815 0.0696 .
press 0.02575 0.01618 1.591 0.1115
v 0.15385 4.83824 0.032 0.9746

Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Simple Logistic Regression Error in Python
By : Ravinesh Prakash
Date : March 29 2020, 07:55 AM
wish of those help Here is the line of code. I know the issue is that I only have a 1d array but I cannot figure the code for casting it to a 2d array inline. code :
data[col_name].values.reshape(len(data), 1)

tensorflow simple logistic regression
By : Mick Fabar
Date : March 29 2020, 07:55 AM
will be helpful for those in need The function tf.nn.softmax expects the number of logits (last dimension) to be equal the number of classes (2 in your case {1,0}). since the last dimension in your case is 1, softmax will always return 1 (the probability of being in the only available class is always 1 since no other class exists). therefore h is a tensor filled with 1's and tf.log(1h) will return negative infinity. Infinity multiplied by zero (1y_i in some rows) returns NaN. You should replace tf.nn.softmax with tf.nn.sigmoid. code :
h = tf.nn.sigmoid(tf.matmul(x_i,W)+b)
cost = tf.reduce_sum(tf.add(tf.multiply(y_i,tf.log(h)),tf.multiply(1
y_i,tf.log(1h)))) / m
h = tf.matmul(x_i,W)+b
cost = tf.reduce_mean(tf.sigmoid_cross_entropy_with_logits(labels=y_i, logits=h))

Spark: Extracting summary for a ML logistic regression model from a pipeline model
By : Nishchay Khanna
Date : March 29 2020, 07:55 AM

Scikitlearn's logistic regression is performing poorer than selfwritten logistic regression in Python
By : randallcp
Date : March 29 2020, 07:55 AM



Related Posts :
