Skip to content

Latest commit

 

History

History

Lab2

Lab 2

include("./main.jl") 
using Plots  

Loading Dataset

train, test = read_data("6.txt")
# my_normalize!(train)
# my_normalize!(test)

Finding best

Least Squares

res = []
loss = mk_loss(:SMAPE)
for regularization in [:no, :ridge]
    if regularization == :no
        m = mk_model(
            :LeastSquares,
            mk_least_square_regularization(:no),
        )
        w = compute(m, train)
        append!(res, [(calc_loss(loss, w, test), (regularization))])
    elseif regularization == :ridge
        for _ in 1:10
            τ = rand(-2:20)
            m = mk_model(
                :LeastSquares,
                mk_least_square_regularization(:ridge, τ),
            )
            w = compute(m, train)
            append!(res, [(calc_loss(loss, w, test), (regularization, τ))])
        end
    end
end
find_best(res)

SGD with MSE

  Params: (800, 100, :factor, 0.3, 0.2, 0.00033546262790251185)
Loss: 0.9999999995036284
======================
Params: (500, 150, :factor, 1.0, 0.0, 148.4131591025766)
Loss: 0.9915697595386919
======================
Params: (700, 100, :factor, 0.3, 0.9, 0.01831563888873418)
Loss: 1.0
======================
Params: (1000, 200, :iter, 0.0, 10, 20.085536923187668)
grad norm is NaN
Loss: NaN
======================
Params: (1000, 50, :factor, 0.3, 0.0, 0.0024787521766663585)
Loss: 0.9764951783997626
======================
Params: (700, 200, :factor, 0.1, 0.2, 0.36787944117144233)
Loss: 0.999999999148384
======================
Params: (800, 50, :factor, 1.0, 0.7, 1096.6331584284585)
Loss: 1.0
======================
Params: (900, 50, :factor, 0.1, 0.8, 0.0009118819655545162)
Loss: 1.0
======================
Params: (800, 100, :factor, 0.0, 0.4, 2980.9579870417283)
grad norm is NaN
Loss: NaN
======================
Params: (700, 100, :factor, 0.5, 0.3, 403.4287934927351)
Loss: 0.38611595219123984
======================
Params: (500, 150, :iter, 0.1, 3, 0.36787944117144233)
Loss: 1.0
======================
(0.38611595219123984, (700, 100, :factor, 0.5, 0.3, 403.4287934927351))

SGD with SMAPE

 Params: (600, 100, :iter, 0.7, 2, 54.598150033144236)
Loss: 0.8758378580226013
======================
Params: (500, 150, :factor, 0.8, 0.8, 8103.083927575384)
grad norm is NaN
Loss: NaN
======================
Params: (900, 200, :iter, 0.3, 1, 20.085536923187668)
Loss: 0.7635889654554721
======================
Params: (800, 100, :iter, 0.7, 5, 2.718281828459045)
Loss: 0.40869340073772736
======================
Params: (700, 150, :factor, 0.5, 0.7, 1096.6331584284585)
Loss: 1.0
======================
Params: (900, 150, :factor, 0.4, 0.1, 7.38905609893065)
Loss: 0.6009945752977576
======================
Params: (700, 200, :iter, 0.9, 10, 20.085536923187668)
Loss: 0.763962682892323
======================
Params: (900, 200, :factor, 0.9, 0.5, 2980.9579870417283)
grad norm is NaN
Loss: NaN
======================
Params: (800, 200, :factor, 0.8, 0.9, 0.00033546262790251185)
Loss: 0.38934112557822426
======================
Params: (600, 50, :factor, 0.6, 0.9, 0.00012340980408667956)
Loss: 0.015040756667538078
======================
Params: (600, 100, :factor, 0.6, 0.6, 148.4131591025766)
Loss: 0.9386416874689738
======================
Params: (500, 200, :iter, 0.0, 2, 0.36787944117144233)
Loss: 0.03799896290356693
======================
Params: (600, 100, :factor, 0.4, 0.3, 403.4287934927351)
Loss: 0.9697738974580693
======================
Params: (800, 200, :factor, 0.4, 0.2, 0.1353352832366127)
Loss: 0.034284464536347814
======================
Params: (700, 200, :factor, 0.6, 0.3, 54.598150033144236)
Loss: 0.8736914022663183
======================
Params: (800, 100, :iter, 0.7, 7, 20.085536923187668)
Loss: 0.7657354885754014
======================
Params: (1000, 50, :factor, 0.5, 0.7, 0.0024787521766663585)
Loss: 0.02895688488481395
======================
Params: (900, 150, :iter, 1.0, 1, 4.5399929762484854e-5)
Loss: 0.9986345948305239
======================
Params: (600, 150, :factor, 0.6, 0.2, 0.0009118819655545162)
Loss: 0.9926736985018997
======================
Params: (700, 150, :factor, 0.6, 0.3, 0.049787068367863944)
Loss: 0.026271462769225422
======================
(0.015040756667538078, (600, 50, :factor, 0.6, 0.9, 0.00012340980408667956))

Least Squares

loss = mk_loss(:SMAPE)
ys = []
xs = []
for τ in -1:15
    m = mk_model(
        :LeastSquares,
        mk_least_square_regularization(:ridge, exp(τ)),
    )
    w = compute(m, train)
    append!(xs, τ)
    append!(ys, calc_loss(loss, w, test))
end
plot(xs, ys)
loss = mk_loss(:MSE)
ys = []
xs = []
for τ in -1:15
    m = mk_model(
        :LeastSquares,
        mk_least_square_regularization(:ridge, exp(τ)),
    )
    w = compute(m, train)
    append!(xs, τ)
    append!(ys, calc_loss(loss, w, test))
end
plot(xs, ys)

SMAPE

steps, b, step_ch, μ, μ_param, τ = (2000, 50, :factor, 0.6, 0.9, 0.00012340980408667956)
m = mk_model(
    :SGD,
    mk_sgd_loss_with_regularization(
        mk_loss(:SMAPE),
        mk_sgd_regularization(:lasso, τ),
    ),
    mk_sgd_init(:cool),
    steps,
    b,
    μ,
    mk_sgd_step_change(:iter, μ_param),
    0.2,
)
l = mk_loss(:SMAPE)
log = []
w = compute(m, train; log=log)
plot(log)

Results

function show_res(m, train, test)
    w = compute(m, train)
    smape = mk_loss(:SMAPE)
    nrmse = mk_loss(:NRMSE)
    smape_val = calc_loss(smape, w, test)
    nrmse_val = calc_loss(nrmse, w, test)
    println("SMAPE: ", smape_val)
    println("NRMSE: ", nrmse_val)
end

Least Squares

regularization, τ = (:ridge, 12)
m = mk_model(
    :LeastSquares,
    mk_least_square_regularization(:ridge, τ),
)
show_res(m, train, test)

SGD with SMAPE

steps, b, step_ch, μ, μ_param, τ = (2000, 50, :factor, 0.6, 0.9, 0.00012340980408667956)
m = mk_model(
    :SGD,
    mk_sgd_loss_with_regularization(
        mk_loss(:SMAPE),
        mk_sgd_regularization(:lasso, τ),
    ),
    mk_sgd_init(:cool),
    steps,
    b,
    μ,
    mk_sgd_step_change(:iter, μ_param),
    0.2,
)
show_res(m, train, test)