Skip to content

Commit 94be17f

Browse files
committed
Merge branch 'release/6.1.3'
2 parents dad82b4 + 459b968 commit 94be17f

40 files changed

+383
-539
lines changed

NEWS

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,17 @@
1+
2017-12-05 Viktor Gal <[email protected]>
2+
3+
* SHOGUN Release version 6.1.3 (libshogun 18.0, data 0.11, parameter 1)
4+
5+
* Features:
6+
- Drop all <math.h> function calls [Viktor Gal]
7+
- Use c++11 std::isnan, std:isfinite, std::isinf [Viktor Gal]
8+
* Bugfixes:
9+
- Port ipython notebooks to be python3 compatible [Viktor Gal]
10+
- Use the shogun-static library on Windows when linking the interface library [Viktor Gal]
11+
- Fix python typemap when compiling with MSVC [Viktor Gal]
12+
- Fix ShogunConfig.cmake paths [Viktor Gal]
13+
- Fix meta example parser bug in parallel builds [Esben Sørig]
14+
115
2017-11-29 Viktor Gal <[email protected]>
216

317
* SHOGUN Release version 6.1.2 (libshogun 18.0, data 0.11, parameter 1)

cmake/ShogunInterfaces.cmake

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ SET(INTERFACE_TARGET interface_${INTERFACE_NAME})
3333
SET(INTERFACE_TARGET_SRC ${INTERFACE_TARGET}_src)
3434

3535
ADD_CUSTOM_TARGET(${INTERFACE_TARGET_SRC}
36-
DEPENDS shogun::shogun ${INTERFACE_FILES}
36+
DEPENDS ${INTERFACE_FILES}
3737
COMMENT "copying SWIG files")
3838

3939
INCLUDE(${SWIG_USE_FILE})
@@ -43,8 +43,11 @@ IF(DEFINED TARGET_SWIGFLAGS)
4343
ENDIF()
4444
SET(SWIG_MODULE_${INTERFACE_NAME}_EXTRA_DEPS ${INTERFACE_FILES})
4545
SWIG_ADD_MODULE(${INTERFACE_TARGET} ${INTERFACE_NAME} shogun.i sg_print_functions.cpp)
46-
SWIG_LINK_LIBRARIES(${INTERFACE_TARGET} shogun::shogun ${INTERFACE_LIBRARIES})
47-
46+
IF (WIN32)
47+
SWIG_LINK_LIBRARIES(${INTERFACE_TARGET} shogun::shogun-static ${INTERFACE_LIBRARIES})
48+
ELSE ()
49+
SWIG_LINK_LIBRARIES(${INTERFACE_TARGET} shogun::shogun ${INTERFACE_LIBRARIES})
50+
ENDIF ()
4851

4952
#get_cmake_property(_variableNames VARIABLES)
5053
#foreach (_variableName ${_variableNames})

doc/ipython-notebooks/classification/MKL.ipynb

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -171,17 +171,17 @@
171171
"[gmm.set_nth_mean(means[i], i) for i in range(num_components)]\n",
172172
"[gmm.set_nth_cov(covs,i) for i in range(num_components)]\n",
173173
"gmm.set_coef(array([1.0,0.0,0.0,0.0]))\n",
174-
"xntr=array([gmm.sample() for i in xrange(num)]).T\n",
175-
"xnte=array([gmm.sample() for i in xrange(5000)]).T\n",
174+
"xntr=array([gmm.sample() for i in range(num)]).T\n",
175+
"xnte=array([gmm.sample() for i in range(5000)]).T\n",
176176
"gmm.set_coef(array([0.0,1.0,0.0,0.0]))\n",
177-
"xntr1=array([gmm.sample() for i in xrange(num)]).T\n",
178-
"xnte1=array([gmm.sample() for i in xrange(5000)]).T\n",
177+
"xntr1=array([gmm.sample() for i in range(num)]).T\n",
178+
"xnte1=array([gmm.sample() for i in range(5000)]).T\n",
179179
"gmm.set_coef(array([0.0,0.0,1.0,0.0]))\n",
180-
"xptr=array([gmm.sample() for i in xrange(num)]).T\n",
181-
"xpte=array([gmm.sample() for i in xrange(5000)]).T\n",
180+
"xptr=array([gmm.sample() for i in range(num)]).T\n",
181+
"xpte=array([gmm.sample() for i in range(5000)]).T\n",
182182
"gmm.set_coef(array([0.0,0.0,0.0,1.0]))\n",
183-
"xptr1=array([gmm.sample() for i in xrange(num)]).T\n",
184-
"xpte1=array([gmm.sample() for i in xrange(5000)]).T\n",
183+
"xptr1=array([gmm.sample() for i in range(num)]).T\n",
184+
"xpte1=array([gmm.sample() for i in range(5000)]).T\n",
185185
"traindata=concatenate((xntr,xntr1,xptr,xptr1), axis=1)\n",
186186
"trainlab=concatenate((-ones(2*num), ones(2*num)))\n",
187187
"\n",
@@ -269,7 +269,7 @@
269269
"mkl.train() \n",
270270
"\n",
271271
"w=kernel.get_subkernel_weights()\n",
272-
"print w"
272+
"print(w)"
273273
]
274274
},
275275
{
@@ -406,22 +406,22 @@
406406
"out=mkl.apply()\n",
407407
"\n",
408408
"evaluator=ErrorRateMeasure()\n",
409-
"print \"Test error is %2.2f%% :MKL\" % (100*evaluator.evaluate(out,BinaryLabels(testlab)))\n",
409+
"print(\"Test error is %2.2f%% :MKL\" % (100*evaluator.evaluate(out,BinaryLabels(testlab))))\n",
410410
"\n",
411411
"\n",
412412
"comb_ker0t.init(feats_train,RealFeatures(testdata)) \n",
413413
"mkl.set_kernel(comb_ker0t)\n",
414414
"out=mkl.apply()\n",
415415
"\n",
416416
"evaluator=ErrorRateMeasure()\n",
417-
"print \"Test error is %2.2f%% :Subkernel1\"% (100*evaluator.evaluate(out,BinaryLabels(testlab)))\n",
417+
"print(\"Test error is %2.2f%% :Subkernel1\"% (100*evaluator.evaluate(out,BinaryLabels(testlab))))\n",
418418
"\n",
419419
"comb_ker1t.init(feats_train, RealFeatures(testdata))\n",
420420
"mkl.set_kernel(comb_ker1t)\n",
421421
"out=mkl.apply()\n",
422422
"\n",
423423
"evaluator=ErrorRateMeasure()\n",
424-
"print \"Test error is %2.2f%% :subkernel2\" % (100*evaluator.evaluate(out,BinaryLabels(testlab)))\n"
424+
"print(\"Test error is %2.2f%% :subkernel2\" % (100*evaluator.evaluate(out,BinaryLabels(testlab))))\n"
425425
]
426426
},
427427
{
@@ -546,7 +546,7 @@
546546
"\n",
547547
"\n",
548548
"w, mkl=train_mkl(c, feats_tr)\n",
549-
"print w\n",
549+
"print(w)\n",
550550
"out=test_mkl(mkl,grid)\n",
551551
"\n",
552552
"z=out.get_values().reshape((size, size))\n",
@@ -659,8 +659,8 @@
659659
"Nsplit = 2\n",
660660
"all_ks = range(1, 21)\n",
661661
"\n",
662-
"print Xall.shape\n",
663-
"print Xtrain.shape"
662+
"print(Xall.shape)\n",
663+
"print(Xtrain.shape)"
664664
]
665665
},
666666
{
@@ -679,7 +679,7 @@
679679
"outputs": [],
680680
"source": [
681681
"def plot_example(dat, lab):\n",
682-
" for i in xrange(5):\n",
682+
" for i in range(5):\n",
683683
" ax=subplot(1,5,i+1)\n",
684684
" title(int(lab[i]))\n",
685685
" ax.imshow(dat[:,i].reshape((16,16)), interpolation='nearest')\n",
@@ -753,7 +753,7 @@
753753
"out = mkl.apply()\n",
754754
"evaluator = MulticlassAccuracy()\n",
755755
"accuracy = evaluator.evaluate(out, labels_rem)\n",
756-
"print \"Accuracy = %2.2f%%\" % (100*accuracy)\n",
756+
"print(\"Accuracy = %2.2f%%\" % (100*accuracy))\n",
757757
"\n",
758758
"idx=where(out.get_labels() != Yrem)[0]\n",
759759
"Xbad=Xrem[:,idx]\n",
@@ -772,7 +772,7 @@
772772
"outputs": [],
773773
"source": [
774774
"w=kernel.get_subkernel_weights()\n",
775-
"print w"
775+
"print(w)"
776776
]
777777
},
778778
{
@@ -794,7 +794,7 @@
794794
"evaluator = MulticlassAccuracy()\n",
795795
"accuracy = evaluator.evaluate(out, labels_rem)\n",
796796
"\n",
797-
"print \"Accuracy = %2.2f%%\" % (100*accuracy)\n",
797+
"print(\"Accuracy = %2.2f%%\" % (100*accuracy))\n",
798798
"\n",
799799
"idx=np.where(out.get_labels() != Yrem)[0]\n",
800800
"Xbad=Xrem[:,idx]\n",
@@ -825,7 +825,7 @@
825825
"evaluator = MulticlassAccuracy()\n",
826826
"accuracy = evaluator.evaluate(out, labels_rem)\n",
827827
"\n",
828-
"print \"Accuracy = %2.2f%%\" % (100*accuracy)\n",
828+
"print(\"Accuracy = %2.2f%%\" % (100*accuracy))\n",
829829
"\n",
830830
"idx=np.where(out.get_labels() != Yrem)[0]\n",
831831
"Xbad=Xrem[:,idx]\n",
@@ -942,9 +942,9 @@
942942
"outputs": [],
943943
"source": [
944944
"mkl.train()\n",
945-
"print \"Weights:\"\n",
945+
"print(\"Weights:\")\n",
946946
"w=kernel.get_subkernel_weights()\n",
947-
"print w\n",
947+
"print(w)\n",
948948
"\n",
949949
"#initialize with test features\n",
950950
"kernel.init(feats_train, feats_test) \n",

doc/ipython-notebooks/classification/SupportVectorMachines.ipynb

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,7 @@
194194
"def solve (x1):\n",
195195
" return -( ( (w[0])*x1 + b )/w[1] )\n",
196196
"\n",
197-
"x2=map(solve, x1)\n",
197+
"x2=list(map(solve, x1))\n",
198198
"\n",
199199
"#plot\n",
200200
"plt.figure(figsize=(6,6))\n",
@@ -395,7 +395,7 @@
395395
"libsvm_obj=svm.get_objective()\n",
396396
"primal_obj, dual_obj=svm.compute_svm_primal_objective(), svm.compute_svm_dual_objective()\n",
397397
"\n",
398-
"print libsvm_obj, primal_obj, dual_obj"
398+
"print(libsvm_obj, primal_obj, dual_obj)"
399399
]
400400
},
401401
{
@@ -413,7 +413,7 @@
413413
},
414414
"outputs": [],
415415
"source": [
416-
"print \"duality_gap\", dual_obj-primal_obj"
416+
"print(\"duality_gap\", dual_obj-primal_obj)"
417417
]
418418
},
419419
{
@@ -635,10 +635,10 @@
635635
"gmm.set_nth_cov(np.array([[1.0,0.0],[0.0,1.0]]),1)\n",
636636
"\n",
637637
"gmm.set_coef(np.array([1.0,0.0]))\n",
638-
"xntr=np.array([gmm.sample() for i in xrange(num)]).T\n",
638+
"xntr=np.array([gmm.sample() for i in range(num)]).T\n",
639639
"\n",
640640
"gmm.set_coef(np.array([0.0,1.0]))\n",
641-
"xptr=np.array([gmm.sample() for i in xrange(num)]).T\n",
641+
"xptr=np.array([gmm.sample() for i in range(num)]).T\n",
642642
"\n",
643643
"traindata=np.concatenate((xntr,xptr), axis=1)\n",
644644
"trainlab=np.concatenate((-np.ones(num), np.ones(num)))\n",
@@ -847,7 +847,7 @@
847847
"\n",
848848
"Err=sg.ErrorRateMeasure()\n",
849849
"error=Err.evaluate(output, lab_test)\n",
850-
"print 'Error:', error\n",
850+
"print('Error:', error)\n",
851851
"\n",
852852
"#set normalization\n",
853853
"gaussian_kernel=sg.GaussianKernel()\n",
@@ -863,7 +863,7 @@
863863
"\n",
864864
"Err=sg.ErrorRateMeasure()\n",
865865
"error=Err.evaluate(output, lab_test)\n",
866-
"print 'Error with normalization:', error"
866+
"print('Error with normalization:', error)"
867867
]
868868
},
869869
{
@@ -902,24 +902,24 @@
902902
"[gmm.set_nth_mean(means[i], i) for i in range(num_components)]\n",
903903
"[gmm.set_nth_cov(covs,i) for i in range(num_components)]\n",
904904
"gmm.set_coef(np.array([1.0,0.0,0.0,0.0]))\n",
905-
"xntr=np.array([gmm.sample() for i in xrange(num)]).T\n",
906-
"xnte=np.array([gmm.sample() for i in xrange(5000)]).T\n",
905+
"xntr=np.array([gmm.sample() for i in range(num)]).T\n",
906+
"xnte=np.array([gmm.sample() for i in range(5000)]).T\n",
907907
"gmm.set_coef(np.array([0.0,1.0,0.0,0.0]))\n",
908-
"xntr1=np.array([gmm.sample() for i in xrange(num)]).T\n",
909-
"xnte1=np.array([gmm.sample() for i in xrange(5000)]).T\n",
908+
"xntr1=np.array([gmm.sample() for i in range(num)]).T\n",
909+
"xnte1=np.array([gmm.sample() for i in range(5000)]).T\n",
910910
"gmm.set_coef(np.array([0.0,0.0,1.0,0.0]))\n",
911-
"xptr=np.array([gmm.sample() for i in xrange(num)]).T\n",
912-
"xpte=np.array([gmm.sample() for i in xrange(5000)]).T\n",
911+
"xptr=np.array([gmm.sample() for i in range(num)]).T\n",
912+
"xpte=np.array([gmm.sample() for i in range(5000)]).T\n",
913913
"gmm.set_coef(np.array([0.0,0.0,0.0,1.0]))\n",
914-
"xptr1=np.array([gmm.sample() for i in xrange(num)]).T\n",
915-
"xpte1=np.array([gmm.sample() for i in xrange(5000)]).T\n",
914+
"xptr1=np.array([gmm.sample() for i in range(num)]).T\n",
915+
"xpte1=np.array([gmm.sample() for i in range(5000)]).T\n",
916916
"traindata=np.concatenate((xntr,xntr1,xptr,xptr1), axis=1)\n",
917917
"testdata=np.concatenate((xnte,xnte1,xpte,xpte1), axis=1)\n",
918918
"\n",
919-
"l0 = np.array([0.0 for i in xrange(num)])\n",
920-
"l1 = np.array([1.0 for i in xrange(num)])\n",
921-
"l2 = np.array([2.0 for i in xrange(num)])\n",
922-
"l3 = np.array([3.0 for i in xrange(num)])\n",
919+
"l0 = np.array([0.0 for i in range(num)])\n",
920+
"l1 = np.array([1.0 for i in range(num)])\n",
921+
"l2 = np.array([2.0 for i in range(num)])\n",
922+
"l3 = np.array([3.0 for i in range(num)])\n",
923923
"\n",
924924
"trainlab=np.concatenate((l0,l1,l2,l3))\n",
925925
"testlab=np.concatenate((l0,l1,l2,l3))\n",

doc/ipython-notebooks/clustering/KMeans.ipynb

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -195,7 +195,7 @@
195195
"input": [
196196
"def plotResult(title = 'KMeans Plot'):\n",
197197
" figure,axis = pyplot.subplots(1,1)\n",
198-
" for i in xrange(totalPoints):\n",
198+
" for i in range(totalPoints):\n",
199199
" if result[i]==0.0:\n",
200200
" axis.plot(rectangle[0,i], rectangle[1,i], 'o', color='g', markersize=3)\n",
201201
" else:\n",
@@ -638,7 +638,7 @@
638638
"input": [
639639
"# plot the clusters over the original points in 2 dimensions\n",
640640
"figure,axis = pyplot.subplots(1,1)\n",
641-
"for i in xrange(150):\n",
641+
"for i in range(150):\n",
642642
" if result[i]==0.0:\n",
643643
" axis.plot(obsmatrix[2,i],obsmatrix[3,i],'ko',color='r', markersize=5)\n",
644644
" elif result[i]==1.0:\n",
@@ -707,7 +707,7 @@
707707
" return (diff,accuracy)\n",
708708
"\n",
709709
"(diff,accuracy_4d) = analyzeResult(result)\n",
710-
"print 'Accuracy : ' + str(accuracy_4d)\n",
710+
"print('Accuracy : ' + str(accuracy_4d))\n",
711711
"\n",
712712
"# plot the difference between ground truth and predicted clusters\n",
713713
"figure,axis = pyplot.subplots(1,1)\n",
@@ -839,7 +839,7 @@
839839
"collapsed": false,
840840
"input": [
841841
"(diff,accuracy_1d) = analyzeResult(result)\n",
842-
"print 'Accuracy : ' + str(accuracy_1d)\n",
842+
"print('Accuracy : ' + str(accuracy_1d))\n",
843843
"\n",
844844
"# plot the difference between ground truth and predicted clusters\n",
845845
"figure,axis = pyplot.subplots(1,1)\n",
@@ -925,7 +925,7 @@
925925
"collapsed": false,
926926
"input": [
927927
"(diff,accuracy_2d) = analyzeResult(result)\n",
928-
"print 'Accuracy : ' + str(accuracy_2d)\n",
928+
"print('Accuracy : ' + str(accuracy_2d))\n",
929929
"\n",
930930
"# plot the difference between ground truth and predicted clusters\n",
931931
"figure,axis = pyplot.subplots(1,1)\n",
@@ -1001,7 +1001,7 @@
10011001
"collapsed": false,
10021002
"input": [
10031003
"(diff,accuracy_3d) = analyzeResult(result)\n",
1004-
"print 'Accuracy : ' + str(accuracy_3d)\n",
1004+
"print('Accuracy : ' + str(accuracy_3d))\n",
10051005
"\n",
10061006
"# plot the difference between ground truth and predicted clusters\n",
10071007
"figure,axis = pyplot.subplots(1,1)\n",

doc/ipython-notebooks/distributions/KernelDensity.ipynb

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -82,8 +82,8 @@
8282
"\n",
8383
"# generates samples from the distribution\n",
8484
"def generate_samples(n_samples,mu1,sigma1,mu2,sigma2):\n",
85-
" samples1 = np.random.normal(mu1,sigma1,(1,n_samples/2))\n",
86-
" samples2 = np.random.normal(mu2,sigma2,(1,n_samples/2))\n",
85+
" samples1 = np.random.normal(mu1,sigma1,(1,int(n_samples/2)))\n",
86+
" samples2 = np.random.normal(mu2,sigma2,(1,int(n_samples/2)))\n",
8787
" samples = np.concatenate((samples1,samples2),1)\n",
8888
" return samples\n",
8989
"\n",
@@ -383,7 +383,7 @@
383383
" query_feats=RealFeatures(np.array([x[0,:],y[0,:]]))\n",
384384
" z=np.array([kdestimator.get_log_density(query_feats)])\n",
385385
" z=np.exp(z)\n",
386-
" for i in xrange(1,x.shape[0]):\n",
386+
" for i in range(1,x.shape[0]):\n",
387387
" query_feats=RealFeatures(np.array([x[i,:],y[i,:]]))\n",
388388
" zi=np.exp(kdestimator.get_log_density(query_feats))\n",
389389
" z=np.vstack((z,zi))\n",
@@ -441,9 +441,9 @@
441441
"\n",
442442
"# classify using our decision rule\n",
443443
"z=[]\n",
444-
"for i in xrange(0,x.shape[0]):\n",
444+
"for i in range(0,x.shape[0]):\n",
445445
" zj=[]\n",
446-
" for j in xrange(0,x.shape[1]):\n",
446+
" for j in range(0,x.shape[1]):\n",
447447
" if ((z1[i,j]>z2[i,j]) and (z1[i,j]>z3[i,j])):\n",
448448
" zj.append(1)\n",
449449
" elif (z2[i,j]>z3[i,j]):\n",

0 commit comments

Comments
 (0)