Forums › ProRealTime English forum › ProBuilder support › Neural networks programming with prorealtime › Reply To: Neural networks programming with prorealtime
09/09/2018 at 10:31 PM
#80054
Hi all,
Here another version of the neural network, I improved a bit the back propagation loop.
I also change the inputs ( it can be what ever you want as long as ETA is calibrated)
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 |
// Hyperparameters to be optimized // ETA=0.05 //known as the learning rate //candlesback=7 // for the classifier //ProfitRiskRatio=2 // for the classifier //spread=0.9 // for the classifier //P1=20 //FOR CURVE AS INPUT //P1=200 //FOR CURVE AS INPUT ///////////////// CLASSIFIER ///////////// myATR=average[20](range)+std[20](range) ExtraStopLoss=MyATR //ExtraStopLoss=3*spread*pipsize //for long trades classifierlong=0 FOR scanL=1 to candlesback DO IF classifierlong[scanL]=1 then BREAK ENDIF LongTradeLength=ProfitRiskRatio*(close[scanL]-(low[scanL]-ExtraStopLoss[scanL])) IF close[scanL]+LongTradeLength < high-spread*pipsize then IF lowest[scanL+1](low) > low[scanL]-ExtraStopLoss[scanL]+spread*pipsize then classifierlong=1 candleentrylong=barindex-scanL BREAK ENDIF ENDIF NEXT //for short trades classifiershort=0 FOR scanS=1 to candlesback DO IF classifiershort[scanS]=1 then BREAK ENDIF ShortTradeLength=ProfitRiskRatio*((high[scanS]-close[scanS])+ExtraStopLoss[scanS]) IF close[scanS]-ShortTradeLength > low+spread*pipsize then IF highest[scanS+1](high) < high[scanS]+ExtraStopLoss[scanS]-spread*pipsize then classifiershort=1 candleentryshort=barindex-scanS BREAK ENDIF ENDIF NEXT ///////////////////////// NEURONAL NETWORK /////////////////// // ...INITIAL VALUES... once a11=1 once a12=1 once a13=1 once a14=1 once a21=1 once a22=1 once a23=1 once a24=1 once a31=1 once a32=1 once a33=1 once a34=1 once a41=1 once a42=1 once a43=1 once a44=1 once a51=1 once a52=1 once a53=1 once a54=1 once a61=1 once a62=1 once a63=1 once a64=1 once Fbias1=0 once Fbias2=0 once Fbias3=0 once Fbias4=0 once Fbias5=0 once Fbias6=0 once b11=1 once b12=1 once b13=1 once b14=1 once b15=1 once b16=1 once b21=1 once b22=1 once b23=1 once b24=1 once b25=1 once b26=1 once Obias1=0 once Obias2=0 // ...DEFINITION OF INPUTS... //ANGLE DEFINITION ONCE PANGLE1=ROUND(SQRT(P1/2)) CURVE1=AVERAGE[P1](CLOSE) ANGLE1=ATAN(CURVE1-CURVE1[1])*180/3.1416 ANGLEAVERAGE1=WeightedAverage[PANGLE1](ANGLE1) ONCE PANGLE2=ROUND(SQRT(P2/2)) CURVE2=AVERAGE[P2](CLOSE) ANGLE2=ATAN(CURVE2-CURVE2[1])*180/3.1416 ANGLEAVERAGE2=WeightedAverage[PANGLE2](ANGLE2) variable1= (close-CURVE1)/CURVE1 *100 //or to be defined variable2= (CURVE1-CURVE2)/CURVE2 *100 //or to be defined variable3= ANGLEAVERAGE1 // to be defined variable4= ANGLEAVERAGE2 // to be defined // >>> LEARNING PROCESS <<< // If the classifier has detected a wining trade in the past //IF hour > 7 and hour < 21 then //STORING THE LEARNING DATA IF classifierlong=1 or classifiershort=1 THEN candleentry0010=candleentry0009 Y10010=Y10009 Y20010=Y20009 candleentry0009=candleentry0008 Y10009=Y10008 Y20009=Y20008 candleentry0008=candleentry0007 Y10008=Y10007 Y20008=Y20007 candleentry0007=candleentry0006 Y10007=Y10006 Y20007=Y20006 candleentry0006=candleentry0005 Y10006=Y10005 Y20006=Y20005 candleentry0005=candleentry0004 Y10005=Y10004 Y20005=Y20004 candleentry0004=candleentry0003 Y10004=Y10003 Y20004=Y20003 candleentry0003=candleentry0002 Y10003=Y10002 Y20003=Y20002 candleentry0002=candleentry0001 Y10002=Y10001 Y20002=Y20001 candleentry0001=max(candleentrylong,candleentryshort) Y10001=classifierlong Y20001=classifiershort ENDIF IF BARINDEX > 1000 THEN IF classifierlong=1 or classifiershort=1 THEN IF hour > 8 and hour < 21 then FOR i=1 to 10 DO // THERE ARE BETTER IDEAS ETAi=ETA*(0.7*i/10+0.3) //Learning Rate IF i = 1 THEN candleentry=candleentry0010 Y1=Y10010 Y2=Y20010 ENDIF IF i = 2 THEN candleentry=candleentry0009 Y1=Y10009 Y2=Y20009 ENDIF IF i = 3 THEN candleentry=candleentry0008 Y1=Y10008 Y2=Y20008 ENDIF IF i = 4 THEN candleentry=candleentry0007 Y1=Y10007 Y2=Y20007 ENDIF IF i = 5 THEN candleentry=candleentry0006 Y1=Y10006 Y2=Y20006 ENDIF IF i = 6 THEN candleentry=candleentry0005 Y1=Y10005 Y2=Y20005 ENDIF IF i = 7 THEN candleentry=candleentry0004 Y1=Y10004 Y2=Y20004 ENDIF IF i = 8 THEN candleentry=candleentry0003 Y1=Y10003 Y2=Y20003 ENDIF IF i = 9 THEN candleentry=candleentry0002 Y1=Y10002 Y2=Y20002 ENDIF IF i = 10 THEN candleentry=candleentry0001 Y1=Y10001 Y2=Y20001 ENDIF // >>> INPUT FOR NEURONS <<< input1=variable1[barindex-candleentry] input2=variable2[barindex-candleentry] input3=variable3[barindex-candleentry] input4=variable4[barindex-candleentry] // >>> FIRST LAYER OF NEURONS <<< F1=a11*input1+a12*input2+a13*input3+a14*input4+Fbias1 F2=a21*input1+a22*input2+a23*input3+a24*input4+Fbias2 F3=a31*input1+a32*input2+a33*input3+a34*input4+Fbias3 F4=a41*input1+a42*input2+a43*input3+a44*input4+Fbias4 F5=a51*input1+a52*input2+a53*input3+a54*input4+Fbias5 F6=a61*input1+a62*input2+a63*input3+a64*input4+Fbias6 F1=1/(1+EXP(-1*F1)) F2=1/(1+EXP(-1*F2)) F3=1/(1+EXP(-1*F3)) F4=1/(1+EXP(-1*F4)) F5=1/(1+EXP(-1*F5)) F6=1/(1+EXP(-1*F6)) // >>> OUTPUT NEURONS <<< output1=b11*F1+b12*F2+b13*F3+b14*F4+b15*F5+b16*F6+Obias1 output2=b21*F1+b22*F2+b23*F3+b24*F4+b25*F5+b26*F6+Obias2 output1=1/(1+EXP(-1*output1)) output2=1/(1+EXP(-1*output2)) // >>> PARTIAL DERIVATES OF COST FUNCTION <<< // ... CROSS-ENTROPY AS COST FUCTION ... // COST = - ( (Y1*LOG(output1)+(1-Y1)*LOG(1-output1) ) - (Y2*LOG(output2)+(1-Y2)*LOG(1-output2) ) DerObias1 = (output1-Y1) * 1 DerObias2 = (output2-Y2) * 1 Derb11 = (output1-Y1) * F1 Derb12 = (output1-Y1) * F2 Derb13 = (output1-Y1) * F3 Derb14 = (output1-Y1) * F4 Derb15 = (output1-Y1) * F5 Derb16 = (output1-Y1) * F6 Derb21 = (output2-Y2) * F1 Derb22 = (output2-Y2) * F2 Derb23 = (output2-Y2) * F3 Derb24 = (output2-Y2) * F4 Derb25 = (output2-Y2) * F5 Derb26 = (output2-Y2) * F6 //Implementing BackPropagation Obias1=Obias1-ETAi*DerObias1 Obias2=Obias2-ETAi*DerObias2 b11=b11-ETAi*Derb11 b12=b12-ETAi*Derb12 b13=b11-ETAi*Derb13 b14=b11-ETAi*Derb14 b15=b11-ETAi*Derb15 b16=b11-ETAi*Derb16 b21=b11-ETAi*Derb21 b22=b12-ETAi*Derb22 b23=b11-ETAi*Derb23 b24=b11-ETAi*Derb24 b25=b11-ETAi*Derb25 b26=b11-ETAi*Derb26 // >>> PARTIAL DERIVATES OF COST FUNCTION (LAYER) <<< DerFbias1 = (output1-Y1) * b11 * F1*(1-F1) * 1 + (output2-Y2) * b21 * F1*(1-F1) * 1 DerFbias2 = (output1-Y1) * b12 * F2*(1-F2) * 1 + (output2-Y2) * b22 * F2*(1-F2) * 1 DerFbias3 = (output1-Y1) * b13 * F3*(1-F3) * 1 + (output2-Y2) * b23 * F3*(1-F3) * 1 DerFbias4 = (output1-Y1) * b14 * F4*(1-F4) * 1 + (output2-Y2) * b24 * F4*(1-F4) * 1 DerFbias5 = (output1-Y1) * b15 * F5*(1-F5) * 1 + (output2-Y2) * b25 * F5*(1-F5) * 1 DerFbias6 = (output1-Y1) * b16 * F6*(1-F6) * 1 + (output2-Y2) * b26 * F6*(1-F6) * 1 Dera11 = (output1-Y1) * b11 * F1*(1-F1) * input1 + (output2-Y2) * b21 * F1*(1-F1) * input1 Dera12 = (output1-Y1) * b11 * F1*(1-F1) * input2 + (output2-Y2) * b21 * F1*(1-F1) * input2 Dera13 = (output1-Y1) * b11 * F1*(1-F1) * input3 + (output2-Y2) * b21 * F1*(1-F1) * input3 Dera14 = (output1-Y1) * b11 * F1*(1-F1) * input4 + (output2-Y2) * b21 * F1*(1-F1) * input4 Dera21 = (output1-Y1) * b12 * F2*(1-F2) * input1 + (output2-Y2) * b22 * F2*(1-F2) * input1 Dera22 = (output1-Y1) * b12 * F2*(1-F2) * input2 + (output2-Y2) * b22 * F2*(1-F2) * input2 Dera23 = (output1-Y1) * b12 * F2*(1-F2) * input3 + (output2-Y2) * b22 * F2*(1-F2) * input3 Dera24 = (output1-Y1) * b12 * F2*(1-F2) * input4 + (output2-Y2) * b22 * F2*(1-F2) * input4 Dera31 = (output1-Y1) * b13 * F3*(1-F3) * input1 + (output2-Y2) * b23 * F3*(1-F3) * input1 Dera32 = (output1-Y1) * b13 * F3*(1-F3) * input2 + (output2-Y2) * b23 * F3*(1-F3) * input2 Dera33 = (output1-Y1) * b13 * F3*(1-F3) * input3 + (output2-Y2) * b23 * F3*(1-F3) * input3 Dera34 = (output1-Y1) * b13 * F3*(1-F3) * input4 + (output2-Y2) * b23 * F3*(1-F3) * input4 Dera41 = (output1-Y1) * b14 * F4*(1-F4) * input1 + (output2-Y2) * b24 * F4*(1-F4) * input1 Dera42 = (output1-Y1) * b14 * F4*(1-F4) * input2 + (output2-Y2) * b24 * F4*(1-F4) * input2 Dera43 = (output1-Y1) * b14 * F4*(1-F4) * input3 + (output2-Y2) * b24 * F4*(1-F4) * input3 Dera44 = (output1-Y1) * b14 * F4*(1-F4) * input4 + (output2-Y2) * b24 * F4*(1-F4) * input4 Dera51 = (output1-Y1) * b15 * F5*(1-F5) * input1 + (output2-Y2) * b25 * F5*(1-F5) * input1 Dera52 = (output1-Y1) * b15 * F5*(1-F5) * input2 + (output2-Y2) * b25 * F5*(1-F5) * input2 Dera53 = (output1-Y1) * b15 * F5*(1-F5) * input3 + (output2-Y2) * b25 * F5*(1-F5) * input3 Dera54 = (output1-Y1) * b15 * F5*(1-F5) * input4 + (output2-Y2) * b25 * F5*(1-F5) * input4 Dera61 = (output1-Y1) * b16 * F6*(1-F6) * input1 + (output2-Y2) * b26 * F6*(1-F6) * input1 Dera62 = (output1-Y1) * b16 * F6*(1-F6) * input2 + (output2-Y2) * b26 * F6*(1-F6) * input2 Dera63 = (output1-Y1) * b16 * F6*(1-F6) * input3 + (output2-Y2) * b26 * F6*(1-F6) * input3 Dera64 = (output1-Y1) * b16 * F6*(1-F6) * input4 + (output2-Y2) * b26 * F6*(1-F6) * input4 //Implementing BackPropagation Fbias1=Fbias1-ETAi*DerFbias1 Fbias2=Fbias2-ETAi*DerFbias2 Fbias3=Fbias3-ETAi*DerFbias3 Fbias4=Fbias4-ETAi*DerFbias4 Fbias5=Fbias5-ETAi*DerFbias5 Fbias6=Fbias6-ETAi*DerFbias6 a11=a11-ETAi*Dera11 a12=a12-ETAi*Dera12 a13=a13-ETAi*Dera13 a14=a14-ETAi*Dera14 a21=a21-ETAi*Dera21 a22=a22-ETAi*Dera22 a23=a23-ETAi*Dera23 a24=a24-ETAi*Dera24 a31=a31-ETAi*Dera31 a32=a32-ETAi*Dera32 a33=a33-ETAi*Dera33 a34=a34-ETAi*Dera34 a41=a41-ETAi*Dera41 a42=a42-ETAi*Dera42 a43=a43-ETAi*Dera43 a44=a44-ETAi*Dera44 a51=a51-ETAi*Dera51 a52=a52-ETAi*Dera52 a53=a53-ETAi*Dera53 a54=a54-ETAi*Dera54 a61=a61-ETAi*Dera61 a62=a62-ETAi*Dera62 a63=a63-ETAi*Dera63 a64=a64-ETAi*Dera64 //GradientNorm = SQRT(DerObias1*DerObias1 + DerObias2*DerObias2+Derb11*Derb11+Derb12*Derb12+Derb13*Derb13+Derb14*Derb14+Derb15*Derb15+Derb16*Derb16 + Derb21*Derb21+Derb22*Derb22+Derb23*Derb23+Derb24*Derb24+Derb25*Derb25+Derb26*Derb26 + DerFbias1*DerFbias1+DerFbias2*DerFbias2+DerFbias3+DerFbias3+DerFbias4*DerFbias4+DerFbias4*DerFbias5+DerFbias6*DerFbias6 + Dera11*Dera11+Dera12*Dera12+Dera13*Dera13+Dera14*Dera14 + Dera21*Dera21+Dera22*Dera22+Dera23*Dera23+Dera24*Dera24 + Dera31*Dera31+Dera32*Dera32+Dera33*Dera33+Dera34*Dera34 + Dera41*Dera41+Dera42*Dera42+Dera43*Dera43+Dera44*Dera44 + Dera51*Dera51+Dera52*Dera52+Dera53*Dera53+Dera54*Dera54 + Dera61*Dera61+Dera62*Dera62+Dera63*Dera63+Dera64*Dera64) NEXT ENDIF ENDIF //ENDIF /////////////////// NEW PREDICTION /////////////////// // >>> INPUT NEURONS <<< input1=variable1 input2=variable2 input3=variable3 input4=variable4 // >>> FIRST LAYER OF NEURONS <<< F1=a11*input1+a12*input2+a13*input3+a14*input4+Fbias1 F2=a21*input1+a22*input2+a23*input3+a24*input4+Fbias2 F3=a31*input1+a32*input2+a33*input3+a34*input4+Fbias3 F4=a41*input1+a42*input2+a43*input3+a44*input4+Fbias4 F5=a51*input1+a52*input2+a53*input3+a54*input4+Fbias5 F6=a61*input1+a62*input2+a63*input3+a64*input4+Fbias6 F1=1/(1+EXP(-1*F1)) F2=1/(1+EXP(-1*F2)) F3=1/(1+EXP(-1*F3)) F4=1/(1+EXP(-1*F4)) F5=1/(1+EXP(-1*F5)) F6=1/(1+EXP(-1*F6)) // >>> OUTPUT NEURONS <<< output1=b11*F1+b12*F2+b13*F3+b14*F4+b15*F5+b16*F6+Obias1 output2=b21*F1+b22*F2+b23*F3+b24*F4+b25*F5+b26*F6+Obias2 output1=1/(1+EXP(-1*output1)) output2=1/(1+EXP(-1*output2)) ENDIF return output1 coloured(0,150,0) style(line,2) as "prediction long" , output2 coloured(200,0,0) style(line,2) as "prediction short",0.5 coloured(0,0,200) as "0.5", 0.6 coloured(0,0,200) as "0.6", 0.7 coloured(0,0,200) as "0.7", 0.8 coloured(0,0,200) as "0.8" |