From 7c9cddf7860c6f7e242ca0dce3334cf2328a8022 Mon Sep 17 00:00:00 2001 From: RichardTea <31507749+RichardTea@users.noreply.github.com> Date: Fri, 15 Jan 2021 17:25:36 +0000 Subject: [PATCH 01/30] Draco test file 2CylinderEngine from https://github.com/KhronosGroup/glTF-Sample-Models --- test/models/glTF2/draco/2CylinderEngine.bin | Bin 0 -> 155392 bytes test/models/glTF2/draco/2CylinderEngine.gltf | 4758 ++++++++++++++++++ 2 files changed, 4758 insertions(+) create mode 100644 test/models/glTF2/draco/2CylinderEngine.bin create mode 100644 test/models/glTF2/draco/2CylinderEngine.gltf diff --git a/test/models/glTF2/draco/2CylinderEngine.bin b/test/models/glTF2/draco/2CylinderEngine.bin new file mode 100644 index 0000000000000000000000000000000000000000..8ea9b3204865f3c4a7e70b206e2b3f78a7cf5127 GIT binary patch literal 155392 zcma&N2|SeF`!{~>*)f=5>|)5iWy=;B`;vW6#n=iFp(4_4U$YB^#+D_CP$C*@ifkbj zHKbCJwNfdM&+qsAzQ5&pJIjoL$$*uDKdd{zf}LOyR%GA8pzdSQ19+Br zrew&S>b|QPwYvuGA`B_3<mom<tOw7m z77pBwJK4?S<7$n21AoIs7CGC;+27XJKA018#bi+`yYcHwj=SkqTNf-v^WAhk=IaGs zhlmt=oUpBaDL4C}cn7h^TP_>@P4*kh355R4hY15dThwS`%2PedNJO>ek0KkTBSfJe zSH)%)O^b5k?W_z%g4K9gcr*BAzOY<)LyBY$UknX!G_7TptUfVi2`@o=4Cj#R&?V+> znV%?ylPSU#?r|RW5sjaFN!6Kx8`y31wj{&EIC$ib&VHSwzU`2%CwJX>@kYjPb>h-6 zd!_kU;n;)YcixF8BU1q~?stO^FVP8{-5!}rHwWWiSE1qH{_-NdASnmnbr6$m-v!$~vO0;jwaa!?}D)qhVu*W57v z84DwJ0RYfmWGEq9+W5<| zHmq8~j#gy+oQ;n3ew$0LB6fo>w<&MErM~-{6t%N)nd#N$&4RNm^5meioePzdlgBxA zmct(WvX68@UwOPd`a$%?S*}mUwDRi9fUFgo{v^q_hJz`|b{_qOGt0Hcz_oDgVwuoS z>~?I@+3SLL+XE;sxbh86|3H$fm;RzpMoB6@R-8gcA35N_uSL3CF?VZc*1jjfI8E&P z?v@Wz$1@(AkptW#zvz$c{D`e`O;%elPZMp)HN)Exms2 zZ)t6Lb;$zzC)@IryDxfs%4ZUEXhSQ$XA?iW6|t+_`X;n>z^!hTX{GWnQ9AB%^tVZV}KHf84QIDavh1Yy54iyZ@9tAf2t-cpm6zu|tr2@4t z`)sa;ZZciQpl?dMeJEHNJ3gV97|d{uo49gg?7WaFv)6ss?5Ho!lVI{+gQ`sN6YL7+7DdVr;;E ziv3resa~T-->&+l>xDJ_e^#Lz6nAX@kYIDfNBEhQ?U<$<#ZBilxOq?FQPI5*AJ&x* zWz*%TId<8;R9RKoWoB%MG3VOHY0fPsk2XmO1#|K;p{5?a6MR?DWaKo+nDMwUn2pC@ z2q}oQPWnAW*o}cewD?~r9l#S{i9kCB`$0lj7k+XuEDlMa3VFv6rYOZMfa8Eg5m^-Q z4AMrgvBAk;8!#Y6Is_wu%1#Il%oqSTobjRpqFaO00SxB_B+P*z8-re4mKJ3QY$ArS zjS`eLa2olLRZ*652N+Z+v*W?qv35+O7(E_vB3(fWz;FOPgpDEKz|l0}YVZak5P>kz z1{e(x45*3jY!HaV0RsEZMOCoPL{o=%Kpp`oA$X19Nyrsb$U{90AB1|)E&_}|_$YJ{ zy~IvA3V0z<1&xVN?t|wD?TkNHF1B@uHrVAKKiOAgcF)*m?^NO% z9*^#XWVCK&e{ZP`yZ4~wp5EXX+5m&2W{n-rg^XNm%9#l;+P0LL&}*8AqM|FFHQ~Bc zEjGpq54V~&;zS0|>FXa(+xWzl3SM)ilZ){-lwE_hn6n?q7+Jgl33De$@Ps$NyV@3M zxJA=2nAI}u#qjSmL!(yqRuKv28pz#Tos|tOM_CucNM%^0OYN8$JQ;U`ui5kuCqDbz z8!wV-eB)?$tNa5qW^aqsTZ@ujv@7&@-w-v;6LL~6WTS2lg*zf*M>8u42;^H9?V}!s zPWb1nGW#txWE^V`@Li?8V$uM$I9{NBHVdI&kTZy8w!EG(ie?%hrfUT)Bs<#f7F9ZD zUgUX}imKRR98YaZsfr73oUfY<;<#`xAD83L+iTOeP#UO=z7+-`lvihCLg&_FRPavc zY*^Ge&gJ>zyo3RfQ3;sW90aVbHn;7L$!_ojx1)AGXYn~@RbH_U6zL$9w03_LwQMS( zwS0c^!&Z`ADp?8RF;K!2zduC5!?L1Htb3HoD`XhwP{ci@B=|R%JfD3M9cpS^Ce47| z_cV~t{xn2YU6ua{3Q*L6IW2s2{cX>k(MZk08^Lwe0IjinEIXutuaWNi1D!1f``tYH zB-#62=ICh?mlLEY?-jkVFa(D$q;T&pMsaDLyEv<`N{orKd5c$VOOp4tPadmT-yK6< zRXB6S5}(|(nJkK19Hf+%NOV*2_m0n|P8MPu4l6_|avhT<_>?hS-A$+I%SF76OA zV>^ars~M{TIxXs(Il`$Qd9+0^juAEPPZRPrtptk#7V7ionEV9Ow!{dP)I12~dk%^QaWP3Hl=o7_Mt@^ zL5kixHBL>V*;p2h7xvJ#1Fd3ba6gZmjU@jnC?%U_8J{(M+~gS=#%WtCFmu8b^F&Z} zI{mST#OU$WsnkcOj7MUb20a2iH8g^x3MNBLHT5a6%KZ0I`c6%dTxkVS2|nUYVu4Q| zeKP!M+WC~F#X-&xV)?Ups39qN24Am$%J$+=V#*h?{haE2%BxquEmm$&B0lb2pTC*> z)r|dpJP`^#%PdmQZNN7kzx|xZ{p`-1OY7qJm@Wz-a z0YlJQr_@arje?dl%p8%tHz{8StD=_O|Qi|^lyvrgT?gEIpwHv^Fh(| znn+XAL2&@+&%0VwaTviX2*e9Jk?t!>$(8|wS21{-_W&&jI0-v!i&FZKpzq>4(!vB0 zHxq_}(jBZnd)kTy#iJ75krA;*dRUDfhwO0z+Ql+0B~D3E33@H(#8Y)N4iOO%!w#fa zzgRJ@gZ+|F3z8Q_2*Z-_|~zHD*l&s$h9H+)Yr2uj=R34aYD!}9|VHhFRMuT zldi03bJ-~FFF||EWc|zGtN`jd@+2Dv6QX44Zso&7`RZ-Tb-p|u_Ps^CKapV1$?~C< z;?eqXVCD&?OpOxLL;UPIU@(7EZD=Pz^uF53k59?=0o&^G=x3Ik-e? z8YCnl4Nlx~axUbpMVYTnmjW$FQdDrHRbS$}1_MeuXCrcIp%I56!AEyJ5n8ZZzwLgr z{gX{Y_Bl42Ne57e`kil9p=NPT)|~7EmY#0$0@`QR82zZjw3j`GgWcfidAUYmPFMwT zjRZ9a)wnTgVoU)1&;k|_hiV{3H7_6>Zkz$#2pRwt-X6bl-d9osP-vIv6nMB7tVJMT zQyj0kx2X*dcDSy5G|w>xk*YLw!R7(1!(K@%iQ^2rza7>XBw*F(`u>4L3h|72`B8k! zHA6t2CX+@?{_f22M^ie?o^jkHUvl_HXB~8hv8#lcvp-eT1JD|_$YoR#-`PGx-)y!V z!T_ITf*>n$Da)quu#{naR<@Iz?5CO(Zd#>k!2+9=GIK)%8(T3+Cy~HR7QQNd`nyoF zBukg7>_^*v8P6aMo+J-yUaF0S>mpxPP1c^7QY6$%x?WRRL3NdfD?^bV=U5>x8xSewj87#@CIW20M+0st|{Bb3@ zHKM!@78{4#gUa}5U_S%Jlo&4UbxMB(llA@PkEj9_`1jHo0(L93$B|fb~QCzb=vd;qSa~I3R#7! z{#Wzq`3K3SrXng1-w`xEj)Q~twR(mPT3nT*s@Bv!m6sD|-_3qW`TTx|Qaih2Uz}wl98v8O8*ikR;w@;!dIRRC zg^=0JEJKJ$p=Ej6#FGc7o)z3J=w=ODMVw(^Mu%L4uHO>W4~Z8l6Ty* z=(X<30R|FACIr1a_v5~)8_m;uQ?fylIiEXIJhaArL zXjZWxt-1)~hhDf4GZ$PWnD-dKUDul`EX(#g3)M1xh8$21O6Ess+b6Q5!1}3DzG6$LYu0ZE6jmt1}Nm-GdI|vJj~wuXTuHtKwHd;vh>$` zHbOj0qeQf%5YRpw^EMF=B*Or&G3diln4m@Mn82Qi@fIXwVIIH@6r=eNApI<$V_|k+ zh@u|?){rWe90N#13XYrX2j8?^JMCh{WDVAMW87nck*3s7-eTc=s!ZXkeo7b1;+TJK zDBmz3OCiq_Sltmpm0B($1ZG?6cGg3zROA`66>Co7le}0y=l21_LW5SUOMBiiN&X_w`heX(r z1DGOs4S)|WJTeZ~MmU=*#k0eA^?A1nSTh2fl*;z~^?W?dmY5-==)DM>GnBLi4N$Fs zEfQ9yzC7dYjv(<9sOoeGWH2i#!hO-8N&1HV(Ccepj~<8$R%68VJ{O95 zvqk=1gne*Ydz+P^cZQ$cvm#=x=y_c4m{76!HbZh4OY9u6Vee#m#Rx28PIG6#3m6M2 zT}oOP`$MK^+SoGkn!d2h)HdZ#J2o#+hjRNPbcW0xj17l79wR3&0XFOemxdQ@DCP@R zE)muv!cvGa_Fx_Iw94{RRkwX%Kz0~6&&_!yUcV8WaAN89U+j|;Hy6%Vb%%!9AGoSa z4`$Sd#`jF}4hVx%Loda}@rvUXR|`$sEh@SFAVn101B||fgh$os?#zIiVF628x}%$s z@@tPZ6|EEFzME1n`xay-!BHe{8Ra){>aKZ}rGR;Y?90r)^Yml=#fXqn!r=0KGrS5U zj0)z0efA8%5scivU~h^CWPkzu8ew~w4%o#4@G?QJN8>$b4**&{UOy)FSulIp4rUk9 zWaDG|hC7F3gdd ze+{-;EquQIjiP)|5U%9Z^avy0No~G%bWK1{-%I86$;-=3lpn~+;{ieGvaBKX4@x4| z*YX4|c6Irb`;ne^nSHn+Tv%qA(6KD=-qA~Od_!Tpq1j(+_~y;bWM18UrpXPz0!0yu z#SRlQEXLoj#xSiuATRC2duC}T{PzRuWpvG_-oJieOm)zZVVhUFyvcR?_*su3bl{_! z?<$N>)m|Z^$(M!HOFua56}H^!O->uMe@is4QmNsgNO5R|_qT|%`y!?8a7(oZ2;GL< zSKQv0nsPS3s};PWI%QFnE>dR`F`693jkl~>r|LVu4!U_=xTxQJiDmWyX7}S!;jU)O zG*#{%6rc23`_%ox@OvpH?a25)FT8e66eK4O`&QPeB;B!Br;t!{^Y?=qa&Q*xs=yn|I+bW z3K?@VUcb}yAB~J2evEBX&;vJ;ZEM;D!nfGN6w7M+ZrEy$R7$o7p8V)t4Pm|^8{*QB&Pc+uvjZSJ}L`qTGw-}+f4`a8Dczm@7HAX0nW49kz36w+qCAEdeGKyp|*4( zxa(ZA>n5V~nfk9Amsnq^aFO{~Oa+Xs?`+|(u8v~_M{>(Q+%vRlxhi9x%p3g7bTIH~ z{kKs{3Eg5~PT&wVavTo4W2~5O-0bH&*YUPLb?liYMV-)sFz~6e^Sbr4O}tTR^UNb` z_Y3~Pze5vsLuc50&CDo%Wktwn@GgJBkm-n09%oI~_ZN}jN96Yf*#DKZWE67Uk&J1n zcwDgjvbopKu=B`YPQ?N#5AFI>?~Tlwc_MzEi>DM!9B?ij_%r!4d*czhgnNU7L-L)= zj^fn4*>BhfLSa(38(7(mq}klmT<=(Y<88zQ8y^ss5ueK{tXE91#(nPp@F-?7p!_)# z)^ClWksdSREgmYm<+V}TcH_|!?b1}8_peJ~P)XR~sC#}_ zznojMOaG=pYV|c2^UyU(lSsVtb$F@v&+@>=^0Uy1t^2j>HQNPs1LP?l0ax)P!rBTp z`|$Qj-R5)7?fx&{CR8sa+(}h0xo!5k==>>5o`%saRM)=Nk!1DFs)lsL63Wqp=11#Y z2vflSnOBwh_3>eU(t?Ul{ zx*KYue9GG=ZT0>0-FE&7s}cPJx3azsK^rRpRUeWzv;q(vH9T>;NgM=GmCc+lIQ52s z_|rP?GCE49*Z*VoRggOG+dLi+a{T4zzR4i5>7muz0YT(+uJ$RTLhMh80M!PzhYwe@ zGTwds-xGOh!I4H{->00YIuWtoV*0f^lfNHwSd5-){^Fgs*?l}>t@f7W-PJV_ux>Kg z)_lmKWBrxvzyV!E&cVph`6xww{X1XM#f@J5S(?lru$C7W=o^t8hX3(ET>fXlLQS$) zz&OMI;)h$qX0Sq6SS3&e2xl6?xeVb#!n4zna56kkgMK@7zMb%YH@^OIm48;aW%go8 zT+8(5yN@D5qnfzFGo0E*n+k8Zt8#{~jb8}4e>1gxZa%~*=tHruIVI?OkMkj)t+%8B z=lqJ!&gV7c>uH5G4h7fUtZQ9@JJ1cv!tTw>((bWf60$F2c zKD2ELJF1-5nOq6~KM#%7n5uuAFGz8mEz?n{gBVfO1J_$KD+py@!LE$?ud?~c^OUT& ze=nkXUj6tOqMJ#TmX3o#--!DpfF>(KJ zK8sjK7%}@Pe1mw+uX~)|pNBJ{*~zt(T!`T=cz^pxF9{d(6SYSd$bxi4R4El(S7zH= zsVlKI1wXE))a+&TQB4VZ%!XWLiI6-dUtgs5+B0Mf2K(CZy>Rb7T!+fNXrjE`ms%(6 zQA2a-9-j=8Z`SX#$|PQX$#crGe-~SWf)MC=b!;s*T()mf{$8mK=7!DP^1xO5earM4 zWykp?UoN~aEk10JUnHY`ws~As@ZshU-(*t=fpXNr-bSE54w!ZPU;R%IZiAgDa1PoU zjxRob7OKAl#p^-oI#7T(Bz6!g=7WkKrn}fw+SEi1rc>gnHdGD4a19EHtW7|nG)#kS zkYoaqfFx_nX_JvQ1Qe1)CR0c(B&0S8Nx%xCkOT@raqk8xD5rrWBT+1S)F?R;CxL}P zCL{4k0*f4S?@54!#1mNXBrJ-3j~@!VN6CZ2?-2{)_gHWecnHEMb^;5Tyq|z2NDA5$ z{<+yJ&4WVi3XpkF!uVqZT>{FQ%tJ09xDyEM_)xOIUU?S6ZZAF!Re`!rmL#wf4iF+y zyX>eVs9hG+?g7+e!U5FoJHjc{?h#Zqo`a~`2DE; zdn|T;qf*&``eSX0?ZrKZUt)UCiE%-V<}I!S-j=Ca{HF<#%~U)X}!t+ z$hQW;-Tu5Fw>zISDUj@q@&}x^=J?5Ru{?B!iv{PzCi&LVE5V-j?tIB>;z9)T)e|Yd zBR#H^DP2j|d%LV+vqsSlhl5enE42vc-l=o09Lw{*+CSHwMK~Acqphw9<=;rZw}jCy z50UoY_K|8EJ@7N-Bde*?b*WjWg!cX_ZYg{#Ts%*neW5f#yJLx(Fh4Ly4EX+`?4<5n z$?xCQM=3*8y4m?phr2Q7zAejvS6@6_-(W$IjJubuLA z5z}XDRVJ3vh-6U?TGADb`a-Rq~YON+XSPbex7IN(^DLFUd@WBjZdY9 zTs?`BFAEHC`oNYpBBD}uPi?fz&TFzgbhD=S5E9j|Aoz0C+Y2hS-D(gf{%H3T_O*ZZ zXgR?)I3D8zMCqH+?O)9>r19~ zee}GkzCK$am7p0IXtw2(6z|A%tn&$)cWaaA?Wz;lkVi~Z)aWfRGTFA0&CcEyOpK9x zY`C%ZmLlUNqxn(1q-}ExE%W1Ay;b}F%nGv5!)`6vvtOG(b*VL6yPs%cT*Q3$h(<;% zHHz=MEWQ(Vzo`EF<3k2jnPbp5h=AF%MI4Y&RHrNzQFE!oG50`dCtyN0{e4S?&F>{%y!YbHrn|4%a6lT zob!9tYBdMPni zAAgB=m6u`m=$yk_`+Ps$JaRG&Ext+iNDVQVUbm0xZqMW8vVD-97B{YmvxwS+mA=s)k)*?$esiL(IS2XD!`2 z9f@xbz+#CKx?BN?y?>8c_ph?6pZToUGbkL95P#^S0p)V??{oX1_hE#6-OE3;m_v?g z@C4O&m1ktny^x+uE|q*1mwl@9iNG3uyx1sDbMUL~__4glg~VBombADOrsy`SHaD61aRJwNeI@fcdri%f( zr`LoNMhzrvoF=MU)It-Vj9pjkHQY4V+&B`4xpYo8+jK11VRe$Ox%Ec1naGzRbXU&{ z7k8sYL&swJ$F}G{1CjC`E0~0)`(}|@O{2Z?RY~mnF zq`B-TMDltMoqTZg3?_&9G|!Yyb>$72`#s(5+cCwi$B1Dt@$Y!YTXd~{efa+SEpNfr zy@SHX59ePp$y5pay*eOQnW?xKwG(v7{oUug7ZeMtuOA_MxdmHor0w`T>dSwlz;seo zg;Qd^dGR1U0`oZH#T^AMg`SR6pZnq6>MLHHCUT*LcPLll#NZGsoC%xX-?n_AI{eg=rkpWH!`Q`xSMyy&spIJ{cE{j+`JVzj%dL)}5fxr#R!S9VcfJ+bN6~M-lVlgkEB* zYU8bkVs{x8zj-ayMe7ssBUVs|dq`_4Cni><$U z|3DvovzmMGNBh4+ALw1kNQ?=08+{HkMBVA3Pvb&ew&IE{1Y3gXYHQOJ#otK ze^&S3E8lyX2YHDX(XG>81sd5!*-z`A)k|1E!P|6iA-B6CXqVPE#?$PqHf{3SaH>}# zWcP+^qHY7;Wh)1f02PWXsho6p6;W!WVCGX44dX=L_eyN=3JGp-M%+!+Bt3sH{{N$X zKH=c#wdtehGyZT(Gzc`ZybAg3;%9Fi^G4Sr*Z5jVW#rb7?DmSl2IuV)4ka61$69CW ziLY%E)*0ulOL}h=Hnc@XhqxN0o_KaMr2qMVpHH9qKUzQ2&=;Ne zRa%fHJHzI%$%)Xu=Cy*t{gC!g&VIXfu3lL2!jC_#{;Vh7 z*tzxG#i-kEN3p-b|8}}gR|-%{C~1ri`|;LOR9`|ixkgcH@~`CvdQ0+{BzEw3Va9mf zlfoWS;N-%J!a-EcM1u13RTek*wJy$PJt@uktJ_wr(uxP8}S$-Gv9-d2CEf9j+b|aHp5*V z0#}~PyfV?LvvHlfdbh4z#$nv4%whMyM0L5V)YN^hDzvME?ImB^mQsDc+vA7pD7AGp z+<6G2y|^*LEyd}>@v}FSf`F@=NFU?SLE1jHmD+^Q7kgiq_va<>^zRg>JhMDF+)*;9 z=$N--*7<)m0RRiKiUs)z@d@z+)5_$CMia)-pAaeqr+Uno+V<4+1VuNr)o^`K#h#<;h4V=pfCdzT9qGYaHCLX5iQ6p1fFhA z_e4mgBf(cR+>eBq<-rUhnTdkgPD$1hp5MqV1XNmp8{YS067GKfM`L%SEWH8 zG0U2eg)C@9rfGm-NdRX!B3v=sClG}kE%F%HCIVd3f@3YAu{yLV!v^>}puhqT{2T-5 zzd*h#tfY!2X|Dlu9r|K|;kF2-&4R9f3?iG8ol&seKQrdPp~ZOJ_})dOTl)X@0@VHc zl!@Af&Wbn^EfMFSV#@Ko*|L{HdAO~FAR@-0Oz3T%Z@H{z;3MMXr=ly>Wb;JA@_kPs z>0{MoT)*R*lIck?d@UO$-~ZP!%8#0YVV3{YIYmZi^j>dD70j zsmp!3;dFr=jdb-Msfx76YeQQK4bc6^9Ib1gzM8mKYS&k1Lo?$tI+CV8-f9E(MoX4Q zuzly^)q3W(I%!ScR`%fvo0D?dSY(Yad$rAd)^2;7(;*|}@_R-n zAW__Wr}%wR_g{lf_OfUL*$I9KGkKTLa#`w#TK8B%&tK1#C^C*6?7A%|w%1Y%lWeq99%DCSWE&kA)b}l`1be zK;z;9*D3&QKq_FgXjT@;187vT+PZFI4`mvyvD&RfRw7}gx7(YUYPEG)xk3jjyJ%1DlxVVG%v4JLE91cE6Ww>ZlF&RcONC4;p|dD{{I_-y%wGv$a32vkz6J0+4m@4Kf~aULsAUI~2Y^8%0KlZ$ z)`W!(y(~k)b~VWQi~}u#7|NCR*ZLFgj@Mr(|ADgf`J z0LZ(Aer(4Gy0UMKlTHvvWKqRInLZRShXZNT2%P2qy;^{(TG)OdNnoRYF$8b)`QbD< zERF96l#G;z8Zn6oTI>w8b}kZVJ3)>17yxDzVHW`V(V#vLP!_5Hty=&hQ*}YuH#8{3 zA*g1y{4B3osNPS(nz|IYn<@hgF%az7*v<~yv(39Wya0taNg3yGIJ$WSs4s(zeIPMv z@j%W1LOTl0AmsVVL3U#m+kK1A#P90jvAl{7TVpBEIqVeTb<^RP!WZ!-;}QzO?WbpXLTu(= zyW2hydwNmE{`vI$pP3R|r;;x3#E~_`bA*w7`3h%t!zr#KqgtxZU%%k{qWtqL|88K1GlDFfu9|CTAFs)a;xyGRlml^F z{b#L4Mpsj_R3?xPEi@}zRd3JH&aF>|H}ERw>??{Ql6my48nT_`<MQ@oM^XNK^vg2_=ni9pzh6I$r)9HfT@luM z)~WS||2P4-8%{B4?v}b;V>;PKgLVkMsSH)9;B7aSz`p|NE;%r7QH9 zZ^gK*Uf%eBQT!+EzwX?B`>eLKl)uWdIk!;T>r)1Mt^7b1>)hr%cc@*?|w z-wFBJLk)iVcxe-gwS460SaL3uI2k=)(JQI=%k$69RH44>of~;C^?dbS-)ji+7`Zo@ z^gO-g-ehj=_4(nnh3!L$UpmuTg8I}Ma0%DQK&AMLOh*f4`Hy843lk1`OUZRisw|g9OYf$f>yBh!e-4yX`d7-2g?_D15%5$b)UyVjDcMYtLWXW3%7aLa~ zE)6ps)i{cK_4fXg;eV^_KTfBgMct^rCh#Gx&+YY1bn*I+Ija=sobTD&=BZ20XT<{oU>PzfSMdIfpF{FkeUkzJY&ldHWAJYi`tm-5GN z%22T8Knvl3xyu2i-&#rvB9zw)ojtRb*s}t z4Q{6u<}^OaweC0iyA#xq3&vluC&X+Vyzyw$-6O%ew@y`v7i-mY5bsrB5bp+)c*ck& zN&+=ze-0m>`~6j}7xy)Z7_IB!ZM?poeeMux(H>7YehF9m(q}CV_b62OqfsIJYBy|Qnaj%T0hhGuT3X6UcQe8f}fjSAfF-6dVE}dCd0!n|L{Ul{Jahe`UK2Ox~lduVcnckXi z=KZexyk?P7Rt2#`nX)W}-$QS(G`=|whe4N*?CdOsk3VVstNX-!lKu9u zxiC5vQs-8IJ7e`P3z!_(U;K+bKQNT|fB)>$!YltLfv01=4grljm zQkbC=WcZ!vPxd?SpybDr2oC5y{Iq@>_5zF+4D~7!P^wjcq6G;s&RBw)B0j+Egabb6 z`+@S8Uvl|`Sg8G5LlRT*&vr6T?6cxZX>p+C79JT zMw}sgGLi{a!fat6>dsRWUS4*1IK0(&gPCcVRq+RC#AH^2fGThr0YC_v1I`163?x%J3+W`5NLlxfn_mZ z>^mQedxiDQMhdGj=p2>s49wp%&*#z?J5J+f1_E#b6<|4G&AW=sG%~(?O(czroI|3y zkZ4tM;N&#`48j0hhGIOC1WI`jMQLyU$Yp@oX#)~Av2x+eui8e9BBXd|{m(ztjfqjC>1!+cK|-P3>|97qZLNQ#UtZgkJHgj zAPTvScmRC@0-&7yL)`c>1tPCJz?)*oh&pf*MzZr}T}hx(Pmu}5uQ)|*wSg|(gsg5O z9M3w97DZ4Q(qfcnG;2j>VVD&ianc4AI>t#*9HZEsP6mq%V-wOSGJt_~)O$&`v=MGY zGl3A_ollgocrjwwWF<8!@$u0bWj2 zdOi+eafKYK8H2@XL?aKAIbbm&P1_U+VYv1zKVS<)vJjP3h+wQDisi9AQXwjtG6f+2 z$BRs4K&|;q4oww(mPyW2%Hodi*`(x};Q_^-?5ljxj~_%so)95_ZzmjCLkPlToyrB; z;=mFhY1lDg7zJYe;Vj~w9_qa(TxJ5ObVJwnT4WxnoDJPn^ieP=t zd?E>iQ23Y}hK8{=@D3eqbD)R|bX!|ta3dIz zXa?Cn2D*h=IDi4GJwjotBEUDEi^7`&EYN_8P<#=0b|0}AwMt5llE2N-*2qE6Zv<@T zFd%{(kT1ML1116u5ibPRaDZ~-y$ypyV~qp9g8<|x6a#X>d^O}VU!;8Gq$9X>?>e@d zOd%K^C^pz15XflNNKxAnG7-kv;Kuf|XR$UzQo@8P)fGxNHqkt2@4C>J}GDVn`VnH(r6%aGxCuSvI6?KbXj+RHz zuih+ES_I%sCyM-TFFMi|A$AEtjX`J>J?(|)fXt48!|8eyA?H$X1{lGjU>~(eJSZ3s z3a7#F1OhDWL-MAHTxx{27Z4^)stB?iiIBN&$@>=F|O7$6Kz z6@aM7y})LwbGhX>`6392IRR>L01ipfE*9@YlEntUAQt@}exQfLa}ePnj6kAsroX|` z8@Om6`5BrLgd*%`7{c`|AS4BdwgQ+FfCdLDLgW-ZmlIv>XP%&ObDwI2=e5}IEEI@~ zRc=`#UGRa6iPszS$M7QM_u*^wHd=Kqk>Yna24dr+z2Xe1kK_0d)-EGd#th)ITtL$h z+&!?@GYB$EJ1-wDCQ(-@8OGI6m=_q5I76}vS+Pw7pcU&7HZ>?PtBPUJ)WM6t;I=8k zrH@4CnFjZ4u#GSVK^+PM?siC*782!FHfZ<_v7JZKatO#HV@Mp>UzmtTHc2C-sQSc! z!dR)~*l21nK?;ys2sTJG$9!KJ+o3bLL{qYfjX*L}BV)ZGUCcBVF@qLb!~`Q$V+_P= zAdCeKLK|d>;M!uSF))Qq+2hYee#qwF?tP;4P*>77BvREDI$r&LXT1JpkG? z+X{IqCGR1^mqLz{jM-1%;EmIaLQ`Ro`bR$o?hZa30=I-jikJWDG+D#|c<^ok4{R!!dnF4;b}wO0`8pa3d{)#Xzpe5ZA`G zjMj#VQggVMaw^78>`#>jp5}OR*ZWPEuFB2J#fD1$XXwI5hcm*q)$D5dBc)qN$kJUMweSFQjv~1BF83@nk`Y zp>O*;r@>tuvVI#}q8_KB<_jvx3Y)R*L zGH<=VVQhc-;6#ba7v(;V?yX(H*SowIoK6=hWXwQrVa+lZF6!{vCgYN|1~fD8jHhWk zB*poJ6)rq9|9ei9$Z30m?@;38!ql03D??mC$*sEz-sO_q9lFnDMSftv#PmEll^Fr5 zL!cdRUuZ2bp^WqD!!AB%T6^!Eu*47UHXAQOu;k5~AipZ6#Z&o1dWHaHQenTzO(nw+ zr)#tmo;kMRniZPzi|5<6^X}I|tU@a`M}>8xJw}`PHkI!$It7>tJg<*EzbU2lBTeNjFB?gUAj*G&-Bu&1u$PGn^a4sjh;UFx^Urw zkq`A%vErkPm4TzNElg>#iV_bey|q1x4FBouf29eLqnK#y`Jq;I>F!1Lf%JCnFUJo- zrFJ|va_q~sgO`F7P5+}K|5c{r+tcdBj)%3K`Q*>R5xgbK1kbML=99>kN^R|W5pTeBXou6=vrThEy1+E9NR;XK}-%c%| zyw;O$*%i1Hd&{Q^BYOjrhx6z05$`R1Yo3NTgn#i9B^GjiWX_#>laEJF%iMh0NbYV&cUVzf zi>!?7%%Nir`XNh8SxTs|FBu!iA47%OnEfGM`wD*&c#vzaTG(fr>*8kM{mG7kh)}eY z@!Jx;cQ2)yl$be#eh*2c_Aj}B&l8p51N}>p%cbGR0`(sK*YEo)o4@6^C|T^zU=gWQ zXZU|6C+Z<}uaSxMZH3pTJ8JK%9`=ep*dum$VqfjKk+1w*5q=JZBU1Xl`v%@xyDTNq z{xyugvKD_$$0u^cF!tme&EeS~9rn8;H_uVkMe_U|foUetbgIo?X$ zv{pRO{#j}2lz{>vSw(yZiE#TFT@{a;s=MN9IFj^OoNjkTZ~Tn(aOZz^>%ZRx_32qI zUqXd%9vS_m`sxXW}|d^P)R%=6P<)CN@FU_?diisMRy96#L*W*zpAuNpeGoA_|* z!O~xjXg)L)8Li>Hc_Qy~S?Ik`dDl~sLH1A^%hLa%>`maIdf)%y`;l-PiuUR%*CYTQ-uDLm409*X_!#KJ_mzJ$CS_h5i4_PY-|R$r6E% ziCl#E=LjCqC2T#UOuC6_L35oh;4Z+1_)Lt+4#_V5r?58MD~Q)!zr_1n8ug^#%E#!x zWDd!;I^EVi9Ae)vsy3K1thnZ8mF<26=2Kz6P7~PR%aNk?1sL`QuJC~?E$%r=Z+_Y6 ze&46Ik5Eu^;a#h~8t+$|X=-9LcfZP5lgE}Jytr1(v(9pPW#OK+eY_Ubg3p(_uIQwu6EP47PTDf0$f)y>;kD!GU8>_&`g{)y?me zKZyI)%;#+V;ELF9o)rqe(ZKO;b*rW}UaxgWT)DAj++!V!zXo4YH%jmb?FpGXT5-kq zIxIU+og0G}-llw9{xPa9vP`@4G(%nd(bsD~9iPa`cD{G9ng|tmdE-~=*?YlFl zdAOM?iA*~mmoNA9GG?v~s(bwq2WHDvVumck+#y$Z~a6YrSA7x0n`tk}ou?HT7oddQ_Kg?kAAYGlHKFIekl-4nAt7r3$X_US0mg#r?(ey|<(0XU>(k`#wXDcOhwi`P-;^>W zwAtH1?`)XneOQf-mEGV`#j`Ep{_U1O=q?B8HW+`tmymvNk)dX}wVvBb(sIbhovV1egn4B2O~LM zM75%+V^diBIhwcOn6%%iD;^1_Pi~B|Ex#Dg$szM(XGWj zp+6m4{QAE>*JuAW*v!6D`-Acjt0S|>75mwY+le8NrSZoezP_?=x4C3%``N`WpEDOn zH3KUZauX$r{k1M#vhO;f{q$5_K_x-1szL2uh~PN??63C*st-l&4)j=8uDfB>`p(jF z>C5BbMTHgnBN3M^2eMqh8~ljhVb~H9o}D@qXppjH^_!Re2A{G9uoUG63>j^RShs9E9OF66<79AQJoEuhmYZ3AM=gZ(dTa$w{E5$adNI@zB!j2iwk~EaUb41FfzH_!VYq#OSf!u-Xq;1 z2Ma!>KRj~DlG}4Wta0Qebkix9|3s+lWUlhAe%#e0>RqNW{{f2+zgiQOt&t`G*uD!8 zE0BlM3s}b#-tIqX*A!chRTajv;753bhpZ!xH|d*5bRvUvJ0K|oQDvGjA>1h?BtQa4 zu;>RMsBbk4lYRr#MuWSt(jmyg@Y;=#2!rdZ`W1|CLOQn`NFE5ivdqWOI|9}OV|vCQ zA+eA8-B9s%AbkQT9D+Pc;bkq1{xLTH<>)-hn?zH1dUNza66+TLFNIjatXh^>ZEXaa6k#9L3 zqo^k&uMZPz1`mI)4qpqZxO&AO%_5=wIykU3v%OUe1milP9{B@T#OkCaMA<(8>$p;J z0u7A#?YV87D}|`R<18XePKt4eRSL56-O`z6W|WMc)0;W%Uvh!k}D0*J}d zZX{xJc7Q>aO2aBG5kD%DS(rnY#T4W8B|N_YRU3fQR>hJkJ|eaXz(5Exn$!R;gh>zy z(G+DO+XP8()RVwL8A>q>!Zk27B(aHUCdAbM**q3~h>)(RS>myjV}`^mQxPE^2~94H z!(wq{S~g;(E)=gbUu}d4ATb6gu2SDLe;|P}OqwLap8f?*3?}%g>pe1iC?H&#eK_h-yGt?+r3Oq(Nm`_kGB2pVAI9`R$#8c;waANE zjvO1ol^+}O3THHJ>xNPPMW|47vU$cPBLTrn>J5QEDO9BQD+I_7)hA>sW~}d39x^}v_|Y}utp3~ z!HTbH7W->OK=WG zjN6~Wm#k>d;Jt?;um5>nX<4F3m_JOyzUH1bWUSP~NEUZW#)W@69ef&JWDMJQL*(zXl=&H+5h04}(yQ7?2Vr z(Gn}BJJPNi-G` zDjb%`XEft7m99VTJb2+3NfpD5KwYlDVnQYf0X=OzESxdK-C>%_0?B5fgX~QdsDct6 zX1gp$lY6B=laF~|r`Ztgkpr5afLt?hnQX(%Zd%f2Y>;Ja!uJG$TrEI!hFRj_BQV|3 zm6$(~$K(};xM~olr+gsD7zCtXY39!D%Bqw+v+L2FR$x&Qkurg$2m{6w#r7n@rWx^mD(x6>kBFG~1G+FHAEOLLuX|w_7@PK2)^CA4 zg(n!!cX^hf3mmj3t?6%F;(=#91~|asCo`KknqZoVWzeJQlY26NK62Si!jK(nskh)l zcck(_B`(s6UjQ)yC=nS|3P_+Lf-M1_&IsuEIKay?z;dhNWfCF9wlDC z+p~comv1O%#BPQ3SK#z02+&2~fzx=KTZKd#yn9O7DsAFp@4|EXSo$E) zMLp@MNKfP;wh+ThjUjl7Bq-_08J9LeEaJf4WIGGt9mj%z5lX|Y7XC(59!oPw;vxj_ zR1vddq6|~&8+qF zrM~0QmOrAy0TT@BzPC`@@B|?GTq&qhi!HSaA?BMLf`W0+ts)5jmUq*prU$k}LL4rcQqkPW zF9BJWW2y*zjQ~zI943bM@-X~~wD%y#QVy}w1*3+DE)0?H0BT6ny_@x$Yr#S%%=#e* zRKw+p1$K}k%pKZ=N(Mrni@(O_aiB>ts51)S-sJO^4*_E=G`=5V-hqO{dAOf>xbN^Z zolI2C0hBYqi3aCfh9V)xKuQxMN1kyV!+3;^wSYZu!V zET;@a7HA{)Gy#4)NQr`I$+)-jcLgU$#cAgf(GcJ-{DGSBuU`oC&XZ=hp`3wZW;rVn z@2~(n3V|oO#8PJLgDZ!idOK+pGA|l2q1}Zp$8lw1g;JY>g|MFcpc8r@4~tLA@};^+Uw?cZJk63kW%xdnaTr2;h&I=tHWF3}N^xo2 zq!YGj<0%j`1vA;(zye(q)SDuY4X_H+16=HT6MdR4cR)Y7!W3{M0Vjcx&!0bp*FO#w zXo82vAH{x-Jb2F^oaCJf!cE1L;Bb4GnC>jcs=`m|lfYpa>ac$c;~xpeI9@j7m=CHq3@#U=s!(L$Tnjs?% zS~_6BE(3Xp2#Y{|Sp(*S{SF}avzm9qz!+*@dGEq|odQdpiGsPSL=G>bIVINgI2XeM z3s-k_%6LMyVu|q#yo71cI)+GI+h*29!AWNvyB0^BK+^TVZAnb$Rpd5-%jTu$@*{Wf z0}p{%6l9nLa^`_ePM2?iC9-0PEQ_FT{|xH$qC0$-;#wF2i73d1@WY=WjT1=s!UBC% zsm9{$(u;j*`a0t1E&Z1tTKH>5yW>mMjz3vV!e4p*j~^NZ8EBG`7W{yw1XqT#9W+I- z7$O1tC)<8I7K8n>V2ET9B1Mrxw@9fgfF8k?*dE@cEZK^NNJIIt#vsOOfvtCE?$8E} zfzx8!Z;n6u@Jlb(g|P)6CvIo`sn09mnzyO10fu~BW7?pv+BD>~t+WTP_c% z_qT^eZ-^V3J^mRJof$vO<$dmIrAWF}KJ+YCVRoyc`e?u=pNHRgCck~PjKA8FU(_XJ zeoc4-UcKV z)y9u7CNb@M=A{G|Hr(~SHniSauM}V%;IU4HiKTc|xziT{(g(F8k5BC0X!|;igHtrf zx>)18c-X=G>JC}L$-RWZCsum6x?E4|K<|wC2cd|J(UfKF(WZSzhSw}Aq8H|>%!sUe z?r!qcXU(7?dI|G!PLvzt(^pCjWlxN4m6M1%*gaV&dydz>@A|46(leuet*m+2L8`mM zUB6U4b@=3~E@{zlm7Oo6M{Y;!&U@tta-a)cW@Y0&*xwohXSeul`MH-GM7x#oHswv9 zMTmKCqI}vW1bVYeCi0}s_iab-(c_DTKQeU_cdB@G%%m7RDj{sU;rO<}u$V7AGm)l{ z-?@GJVQHtcC$Z+W-Z!;#wRZZ)^=|iPRU~q4C!n{OE7X8lqJ3%sQ(mbf>DtYJ7iPB) zu|ODJbmgAk()!$StN(v%_nY^uVWeJU_KU)~G$Zx==+3p>Db^3fLbO}*k7g&EJUhGB zaNy-^ue9dj)>(sNPqShIIyGX8E}iSl^(-r)N!%m^&8jQzQQD_4_j$|4pz5Pf?BgfS z|MM*T>TkKc(g-k3lUXSL88w)j!_6qi#*UwUlej%q%-MG5E{`R{(}$i#{remL*JtmR zXzJ(|aTwCo{#{oP67@Lw?3oi#tMt_eqn{)Bf*K=oz3H*i5S9Pn&i=pAWqIii^2Pdg z*(<$8+*jift#zvJOdeYNQd>W{QbUb}R37nHg~I3Do|of<5^}CcXTA%f_U}_kh-bZ8!B>t9YdI;qR9ANouzS8m%1HJ{-U8N?|_J689UHT>(Ew-#tbQ zbf&mEcg=1Y#)^k*ExnPqAUeK%^VpzD>F&JYKu6Q%3mBnWMHXa4_vg%a-q?^;(DIv8 z*1PtT#b8aQI5 zFnNEY-Gx_;^d-x%^#@b-ymTLSWbpXt+`F@>2&L`&4bFz%)5p!joY#38xEK%qxWq2C z3y)cll^U#G^E7I26i0KdgMg*|^9xpCp%`@uQTJs`o9$yV_qeDMAOEYm-|6dkE;-T@ zoCbUdcERjr#d3ej*-fJ7tKcD<5f<3j4hgjyTiB|{D33U7p|4$_*Cs*qm-qvrxb7T>cipjUZE<7 zeovpP# zQ00*oGXA-G|G~0mP}L7nas912Y*i?^benPit{={ZS1x7IZo4##>bI{y72Alc zZY!@?s$Hyj@iShlYHr^R(V99ZtzUJnlqTD>(_+`B@;-m-4KB8KCp!*NM_bPHd&fL= z`D$g~8$7;J*>eiMo4oa3xA_0{4Qn+Nt^fK2oHDe$vPboA>6L?d#1!J+^YGy^{8JJ% zMDi0GfEeSDg|q{YE&n>6^+IctAanX!em&f}6v#xFL1L`YBd_JtKp>+ZkUrT+UJIx8 z?Kc_?rPM!yrpvPS$w!&{4iUo~U)r}6*dk{VgJ5!#oL>+J^EyZ{MsC}OXfH+MZXWh5#w0d7wSjS zlXRzxs%~=zU;L(B$#o2s3s~}r`20>4K7X8T)tmmN^+Goulz80_jIg^|t0Ag%Xyw9( zeLqPYZYQPN`C%8Gw9kHeYj@|+tL`7$FGg;y%1(7`S$^dp-NJk5$y~?zk2ZTLl^kP< zt%vCR0_V0?+8n!xqRyeNR!ke&k?)|)5yFO&V@pwf_b!t6=_)JSztCu*a`Et+ZIEsi zwB=Wmro&cnF zqU|>tUkepJy5&aMx|RK=+a!|vUMevTAhdgi2VcZlsQXGJKRDg}u+f{!-&p_W5M67! zAY0r8iPKp1miNA_GXq8a2gZ)*s=Yccc(L{c>HAohx%9<{rEhd&&N|8V9rjFZ+?sJH z>yJl$RjN@M!y~H(>vbqkiKc6(zxmdrC-LiKmZ)UO_G6cp+(;EBHpZWc_*S(muNI$2 zym<8f$o`o4$;)7B=Z)0Kzx?}}m?lU`z!p|;RlcOw!eq(+2;VQ(`Fm(Y<=KwkJ0IuU zsyXpXRCWE*$mOIY{rS;PDwrdXs2HMBJ-?gMw(qRZER5)$ zsumUw7IYSqCSqZss!FAkEodT4ngfk$L>F$C{rVT{z~B(vnh4= z3B$*8Et>pB-jT`oex#JR*M=kt#rJ(rlX_OD&?O?KRCTm*%vL4uciZ@qv-pSo3GOPh zf!#tM$8Zbl>lZiQRGZLit1J9IIey1#DFB=+iuNhjFtI$=bvkv|#0R_UtzsPgDaJ$o z$@xRKdWZR}#pqHlZr4Qk^?t?IWKq?Aoj*Jr;xgJ8+;CyFv|#7`b=ghrlgE(FZcW zHvb6TSHYW|sGH!)s2mv{xp%Ytb3EZVBD!DC+3o%Da%O!`*~HWDYP05k0!(6td6&_% z_YU0#R^{EbmODetE4GB4p&RYfCVog}XA`oQ@=uhwyje=)&)V<#S^SLI$7Bh{CnR%I zHJ)gmh5J&`mUWt4Bg>vN zy`7L%T5#6iMup|nm8d=35v?TAt{cQ4(_M*kpLiVVY;)?s_OwY`XpB#>Ulu`e9 zruY0*nxIXL-;H>YhKxInwFb8&Of>kl``uz3JH7AkNq*w0arT%oRl4xUW)3ZS|6WQ> z{JB|Ofz+=`{LuoByWd?_YCNpz@oMPYJU6X8_e@@44BJia%hN;T=xo-S*p2X{IA2lE z@x{Ktj17@*!=mm^$MA1;-u7a96yN15BGZ=>KW^P2h8VWT`vs-n-`hAgIsPN`V#H2a z8*GsA*|BZjdA_BGE_L+R)*E(?jjueV%j)m=(layynF``mVQz!gjAjY+4!DIVZT7qGa zol01U+3iQ<2OZ?o<(=7A#wP;pf*oB7YXT;mT6B|(Jk=$WOj_3JEZr5ox9nqig0h!`aZ4PhM|faefP15u&ok4m&|NmopCX)YcwdmyN>|yYkWuob^E}!( zLgKcdkHz*QR{EJ?-$VP8(vJHK-^R7=H~!g8-f%qIYN!I4dG<*G^Wo^MaX~qq|F(Rw zsv}N){_rHtKJCN{@2fr7_+|UN4W``hkNRrr$_p2##$SK|POs z_1D7Z!V|XMGfVP|U(ejLZ@*-Ai~B0jLiAQ}wbqHdLDW+MetTj)t~R9cj`n>&Rm~iF zmoQe;lz8(+)Ox}V6`yoAwI}0oJ>VgWOz|r8B_$U#S*Z*SSneOu_O(x!tU5G%x-v*V3hMuYv(^z z!RXnyhiw|l67E%He4lQz`gD6sYHs(5iyq+~SE_oMUJs{buT~l!uj{J2zipm3Uf0A| z@Rp?J;XcRrH#T!WkBjsO7k!Bu(O&ytTlJmJ$j5xGoBdDoSs2yMb~p$3g!f&0u~zD^ zdt~-pfPikeo$6?g*YHHAkNbDGCetY=W22K>vaFv+-_zgtd8Hd24Y^`e<+m4SNH8+ zLu5;J$@Vc?gYM4kWKTx*%8$?%+3a$EClY&_@~tRy_!H&Z4eDoWz7F})3+JK`&4~u%Z+2J%?0w@` zgINpsL@S}Pyi3mQcb_e+FKZ5ye6wkID_`w_rF-~xB}J^|M%`H+U+dD|i+{J4e|GqM zTekftWwubNrHV7&KA4p?YS$H(ox8ldJLIs`8jTi_$kH0-B z;Hg3I^SmrIlJ^%O|Bjp8oGbnh3E$khGD41fr7mu}A(IFjxftjrq)!DFX|u%bCiAKN z!pBtWS~Ztx&l>Nv4)i}&lCCMc{7o;vzM(syyHv()y5M4m=FuOYN1T>wt{KER?6_IDtbe zd;WFKKTQS-`V>bze(-0LST-jgIDX4me?D?!yDBH%qv2cNm(n^HjzBhaxaLJIRwRp; zFXNiT;_vnNW+5=HE%RnB>~y0ThH`oM{$GZ5iOmT%aa_GlWiYQs>CQXat%B$o`-$hl z5U4ECdleY(cXO-UN^{g zI)C`_NF>K;Vkmw7Unh&cXf1oOYAE$?c;B3xqh%DQxnFxoYKMsuw>kVwOwf~iffeT+ zMQC0l5vv4&{AzifA?=~z8LaUJjnBp55i-AMzh9h^wfb)co1f$=p*1!rGbGEsceVcf zah;cOy7!PSKc&sQfpfnyQWp0AzD&_~+;S|h>G~wYy~cts>0jAd7@~VNnw<4_%m76- zNDYS(65WD?VVdo`=1#2DIw~|HCH%GlvIxAORjq07ZF-l?AcO zMObJ_@So)mnOuV*a|Nv<|AS0kD48%^luZAJD7=4>>f9gd5V*$hpY30i+O0_pc~^h& zL`|)vwYB}Z&G_Ff58E#A$v<%y{2nd(Zy<_4l=_c*3a-aLAVd9R5m-v}23Z-!r=|SR za&MXcB-VGl>Rc=EhJP;RK}X&A{e<1!_^;={QB|0Ri@*s3g1bO5W?MO;g4%!>OR4L^ zM|gAbyoGyj-={tww7FW@Kt(^Wg->H4imm(vp2vb5a5r|Fej*ZqjvM3UpqwuoJcsUGf7Rtj4(ktt72c_kmY}~IROb{ zO0=ZRh<(6EU}JHRB_{S4KmM-V)2vi!@0$oi^XLw7;-UPndAG6>0o}mEn{BZU4_1;D~ zuj~o&pnWUTv5p7Ss$v?>e)ZF6Unf1E?lLUck@KKyYcR1)D_>4kj^8NNf2$73D4fao z;jVSa$)7lIleD(-hMjWlV%+S8(vn9p;Ti`*@8#xRbq9P)yTbj-8GCkqKE#IxqrKDI zl2$7H#iJ}K_{Z{9A3MdhgU_vHN-_q9_JQewPf07M<6k}5qFF$!ZJ&JUvXqwc;Z@lI z+>+7IySu}^H)h0EYZA-P!|5*Afdt5qeYT7^5Z^4*D2Zh)2s0vKTszDG_(^OXT%dql zhgo{i2#I}PkY+8)#!9l0JwU(=(>Q}$=3B<~2_yM{@!cFNF0@{UPGz>s9fMjR>@@4- zdM2AUk`gY6^@Q1eQ6N_ad5iaLBqN3d&Y)_wR5zKngE)x>WIjN27RZ~f%mN6-6j`{1 z^iWvk{36rI@EdBXte_M@Je-~h*Wr6;#3UdGh8#wBU8uftM^rqRPEG&rfwnvUfYBAS|ENeV{<7$^on4<_K{qs7}< z?DCsQSiUs6Tv8J8R7a-R0A_MZR&_`}ipod91@?3NJ5qRrEKO71#R74JPr8|~MJsAr zA0pc9!Q+DIrtKD{rkC5+l=l>yCgb@|bel0LPsza?7Ayq|6R~dCBm4qVdQ>T+U4T+v zmJBO0U`QC3D8=Tn&?D&u>x%?4!^zHA9Rf_!WF+$NVktlwxKcaVvCJVxG9E`dDM9R@ z@?e-Oh(u(n5(Uo_UC1Oofl`tx6ZLx_497$cz~RZB68eIZf=N4=Y_m_m8%gq)0N7n{ z3XZ^JAxJEcC*cL}(a~sijtAQ;5!SPlP`1(M01Up<+=xh*uo1$kyJ?c#R6X&Xlup1y zptHC};~mKunAkaGmYQjqdMp4?^5ul^EUxiu3_meef|wORzfmsZc8Xcto}MX^lt{#l$FHQM^HUTE1XkJX_s5csuy zV;8vo5`tNpR#~b#CEe-Hib zi5QAPN^qxy8=H}6el!B6F*?CAJD0SuNT#9Pure(qR6(awN3ZMcdw0UkI%$h9W%Wr$1j6g@j&VN<^A*A$6SmAVI)%&}0*tOi-QxfN1 zj4j0ZJkl~8u`!J5>0CV%2Zw4NpbI}0$RWkO*y=ZaNyp_--}$Z<>xJ(PDsPhWUMhcB zcP{K5PthA{aZya^-hd~(`)(cVs2vE{TOVO%dPI1L%g=wWw6`$-nnvkhs73H`kAhtd|j`L(=hzl;-??!gqHYkpjT9sX# ze`!G};bj*E?svI;d~BUcl?J?%extGVAasajO6u)BGp!lkaf zVy)Aol0oxXHFrBTtuSL5OSP&qXG#VymATv5+g}?z_t?dKG~}v_yZZ?j%SSGMfOf9x zEfzlBgEv&>7h8Mjrmu|0)4E{7aKqPoir6OeQ=!&-PRH~fdi8rQIJkcIL`YoutfY?Cq3Buuy#e3NKNyh&WaUl(YBCO7JwpoK2nH1Xn#e8dzR!qI*D1|rTsnryIp zv6)&vF{oPh$g!(w$I2pq4oBPY&GJdqEwC}W^)!%}RA2+V1UI}xJRAcU5D?REhkn{I zWk?J`m|_9A#9#?%jsPf+QGHJX`r6RkYnhmn*o(M_)4^MS;Ed&}Z!4<;)5Y24c@2 z-(rb9IQ4=10Dx?oGN%YE9^J+U(-5P>J5F*EcAS(@6(~c3u_1^wBNU{OzatRZr-E!d zQQ+811jrE+|y8J!$7EAM!pN{2}8%RNM73C(H`@GtFwb${rNOIVc z%oKg|>D?r^!Nc!T1h=?41Uz)IVzVtP(3AehD$30I7sJ7&JO7j48sRxCG>a7m=b-t~ zJA2%Y?cM%d+ili=`?W7IVWg^t7w5Lt&7L)>oAdH1E36!Ojq2F`<@)WExrUgBvW%o%6p6H*Ajc7_@lQL?`(bRa-e(lXn+2J;d1AXw8#S1 zt8(gB!;2xI))`h_UYxLZeqyPvHLXKCRNtSe<6Q6&9n`4)`f*~f*`+Go{c<{2+{Zch z&OX_9-c-Z%f8qT$9EJz_^N_~k1l915h`j=~n0<|=CC_>My{Z!u)^xskrA2p{IrBn! zC_iA~%i64WPPMj*GnR(r&l!)RFn1P0EPg}~e$Ss9YCFn!=f?*A9+bXqj7MenW<$FK zw+ML$HA~RN-3sLK%jS>0so77rO{wh7UU@%u^=YNbib@Yf=Z6#UpMDJ;-2fipe>&|2 zV%iJ~oI^D*mgJc{QSo^rrWf%0f@&;?-GNv_MQRLLZR~Q%Hb~VAZ1}Au<^NKaCWQaR zt89vi0<0EaaI6}3aYSZ+z}v3u2WY+z_w?^8g#;8muDFrN+JOZORgkI(But>mb>=_U z)KV6XClIZn$p{R0K@{^2OCWKwHP2hKzKO3K%&JSUGlp zbzAp>yV^O7J-}T4Z=B?pXO;a0B%6PDia06K=7gBNGP6L z3N74NgCHy0U*Le_G?@nQG@p$iNQhPW*A|H=+>K$ftR2BgUk9^jih z5>w6k9n>fZ5)_deKN7tlcBwAp&JPxF;49!wLxRXCs0&j=30T+)gU+AZ3500@Bjmv} zVC?6iZG6ybkjTs038z~i!h~=NQGciHZ%4r7-= z2=V{rn5+h3y|rkc61FB1z!WyN6f_byBEvs?ie7_4K^Qcm$YBx~bq_Gj6RZY-kUy(X z07O>iwqZF-33rQeG!~Z2Tm_&K0_UMdd1QPY#E9eJY|Bole}D;pwGmhd(QIHbI|Szm z+Rzv)6mHVgDlmct){h{7;Hh^{g7E~Ljd3MJ+0W$?2Z8~ztqxi)T8jfzB5406H~FG% zGZ?G|48|=i2sNMF-lYNbdmtd+!4LG5CyT`Q{ShgDxl1I~?aZB|(#(?{CbXeL|H4Wv zP;MhQW7{C#Kf>f8m9cyTcy*iaFVsADy?%FK ze~@>%OTlW3UF*m>(JCaMrbjoj+wXfE%r1gYu578xn+Vo@T@^9BzG9vV3D#0msYTlh zC#FWf0hzB6p?4Dvejau;QF%6&adW~+U7Wwoc;nu=Q|18`9nb8%j_1zkr)_UeP7JKd z`WC7l@o~Gxo&#UrEmq#qy0t}i^;lorH4)E+!Mt$fYKuv$Gt zdu!QMR!!1oSK4BgaNA2STW(<3_lvxDatkhYwk@Rcyqw7{AM7`3ZN6&9_+O+G_K4X#^*=Bh?3L}vaSDbX@yp=OfeW$+aMm_4g?(b{64M(ELQOB=3Y}70^ z`)J(epz4;j`SC5cDic^9*KjxTZP~N3SKOLNXvsCS;XzssW5YQa2&no1&mxdM28{d& zfCktF>*bVifNbV@$_&v&go?qS4lrp8c4U>JXqouo+prK9n+KQ_97xjwU+f_u@E1{9kHCxcN8N1ojkGfi%mLOsLl0SmzrJNXeFwlokN*Z>BY0BlsIg7|a{ zMW4XH$RC_j0^!{21OQVvL2@~!EYtm=jI3tdA%;6T!K_014E4Luli@;cq!>ew7fcE= z06(-GhYSQE&%KZZCJ@>MGuOEstMMeV7Wkfxy8wTdiY8)(ie+Zv9&h%+4H0c%&kieq zD};e4N?7k(7&oKZIp$%i0TnvcCWNU%YNMIpHVhzTC zvj@P5&7g4{(qH{casF$TJN;Ov{kviDKb*qd|I(aI*LD%uJ#`CgRg4Hzm7t2wb(aD# z%msIdJ$?mZd@M12mo)Opra0skUv<5-XZ}}zs7dwY)X=W$&;yyxD@I~>wq0NLV0A@y z=Nl{LJqgD~RkZ~!`ZoN0bzgs1?)AjwK$pOwr&-NEH+}hXXk^Ouf$amv=b5m$Pni>( ze1CJvPko8gJc3ulH-EZuQSSFkNyGYGd@Jh8mnJS%8@Nd3xN7c6_81y|bZh{7Zy)`w zNPQzm;>EnV$arL0aqHHR$M1LqGe&a$J-vKmApfZov!PBH>VUAN zqCS4LIoj{+-S0QmE4G)ChC+CU_Bp49T*JAqjz-TmTjf?%ymw)a)i)f8Rdw}Pj*jc}aQJK29+xxcZ9^x4#Kvsat4NxxK5aKNCf#VihO(YpB3UGleWK=;Q z9>GuFR5VXRFvVc97Sulkd0#-1EVOWtgiu-`_R&M=q%dp@5T^tHPQHb}yc-74A8!CG zF<@mJA_cg|%TX*E+;kegv5YYNn#dA#0Y~fjfL;oeCAw=G20$IulK~?K{hBYW14vX&B0^h`^AU5$Dx?`GKU|KMO)5X%{O8F2x2+t1=Du)X1qu^6~fI!m}5Wxma zBTy1TMxl`$XuzRLO|Bd(j0j}`st$@k#sHc^pzc!0wQq;u-$vnDfhIpg1{S zw!y~8%jdb;jdxM=nZ2U><|5~r%_?Rm@6isT-@lThDAc#b^;ebe46pB(gK1y(lI%V! z*)!ECdrVxkLXNbm)7#ESqQCR;)4vq~_+!|A#M#b6bud&53jxP@ANPcY`k@LVce}E$ zd1orb-qZ~G%(}R{8GEBHqGG#)m&)E(#S~Bb@a2`Cxq>>BzMu%^B5$_4&K5qdccsqeMk z@*`uV6$K5Yb$4cDmHmRFQc9JX$Nwu+@?xq;gV!-_xR-D$_8{+Rfk1*WWHh=i*Xd&Z zXy#_qlXqkIcWH;wBvh?K=PsUCzII-2T$yvm?fnO0^F%bC^dXVA36H0Y+sY;fl@8aQ z>#9jCa{9z;Dl)OSF?4R{h6fg(sBR89+cDA8?$ zy4AQ&8c4b!U@Us3BnJd9LoQt$ayka=M9g?OI6FkE^7Tmr%{*_3Q+4h6Hq*dH?XHEw zf-=hiQfBi3iWP9EJP2~4kyINb)eecl+pR&SsS0eo!~#~I1BtP}pHvZN;=o3bg**mP z*ve9PfImRj-+>?GJhuO;-WW^JU&BTOAzBiyhiw5^XN)~A8*iZG+`Dnt#S#3VRm-pj zobwsaB}rN5%`V@Ka{-(SQA~lE`$JBW0VDop_k5Q=~lDIy{uf`AlJ5kXMeiSIt&xA)oCKIi)XtXz}H%v#UPWM!>c zbFb&VpL$AqPn%%9;R3srPb+U|FBx#Jn`D!5MY5pcZ;(hHlW?asv6rAAI0&Hx6gZ0@ z6oNQqP~;N=Cl5-yFY0Xo{b*w14sKu@sDb7k_^A_-(KMkIG-Q%6FDlPZ5wDFRIa2}S zasZbDz~u#y0s$N~o{#!(UH(&<=i1KpAgx!(l?ebK@cG}BY0guRsR#ciEAX!iz;Hci z4I@uM*D1v3Kbb76_FpEy`xJv>@GUoV<+E?9UU_=10Fe+LC3@-Y&}rdLF%BEOIN!Ue zQS=8`^ojiUhyoI)|9|vTkHC2dI7g1PPzPb8j)y>P!@H()JHOU&eJQfxw#wG5@|-Hy zt+jn(RsFEjtfs!qsRotl|Mj^*f=ofe|Fv4t(5FZ!!2=e7HiwoX0#R4# z--CoR;rGmayqo;*pt$|bP8y!ao-X!YXg6W7H_S~*{RtOKwq&Y%%Ka*iqEfWm^uDRU zgS&0-G#!;<%d?suCm(br7_G1Xs@lXj-0yTCN(vz85xk**z)kWEuOuq%D4h}b3D8nR zavWrWO;})&32;Y=;j0MtX6#k)MFKToK!e+c;w$KhPW1S07XZfw6_%#M$9*G{uBjr` z7tQ&?B}y`oghvP<16+w4CLAJsy&;S5l!$yt7o@utF;i`S2_i=XsCW3@fQysln`#8G z7s+b(ZEie`Th@JL4UE#|?_dDivtSid z`w+0efuEo_5`aK^(yrhjKrko{q#&qd=aD$d-_P#>7;SL-4r3!71)e}Tj+?y=P}|W1 z(sWvY1fH2?KLqlE^o2YDheod;-vYX-cp<`S|M3=4RB(`%oqcyW9ViR{W}ZMA$-hz@ z!uDxE7Zkm6Ap{_9BZz;f0C@|@__j50p2OD}>bv#y3>30sN(orWrg4MV6{!;Znd?x$ zc|e)U;3HxMp+5gKH4i9X1R{?p(ChjX;2JePj1H(&Cz_>^@F$E!G%x-w^blAP!nBY4 zZ8QHgBDh>uZ<2E6{~k?5{<{&q;=E1I1BHWI(*MyRVqZ9dA>d2M2WUTdUuft`(iNM7 zg84~!p)7(k`qL51@`jn|Ms4&hGurVlKeSh??LvjV4|?$57Tb?ODMkN3-}UK#y6tHL zsGrsd?#I30f9}4DBBehVJNeu)&;osEJkA}_H*o-`CBX=wa^`xAqtLuk&?A}4O-Wf6P%f_SjDA>WZcM5J_Mqa4xE=9(q?&wAO z^eZM+U49NLe)ym5dh_~!ZP$=5$Xzt}6CDSN@o=JemCrI*VC^^^Il{CBBt@mq(O#m* zp?tK@aar2AvfjbOgQ!?A5p<*UfJMV)c`-y7?;|*>7bMU1VBI-x^AJJI9cf?A+fqR^ zK8n^%1`6g#BV~S?WF$)gsLs{)UqcfVf=;r&H5qvzcbZWe(w-t-nS>&`U4@ zR0uaUQls+E*};hN(Ckvpk2eji{M+8re+^mD7k4~2On1#eFo^QukaKLL&xv3N(dRj0 zuOLapxG&{pr(vr4UjG~7-roxxWpV`PHw5S)TDy+at4-g*zgdw2w-*`w!VR@?a*Zu(}VvrOt-nmK0r+*j1l>)JIgP`zNVhd(FJgCKr(T>X!KhM}SW0|0O9N z0^5UXLA4MPRPSMMq#=?Tu7wDJ)j&^}3VviTW&M78{CgQ6uQ2OA7M>r)aJqVd=|HUG zkIjv^&GIQNm8}mP`D;lOYySCkLc@VFHidW^|6A1i<3Tj~n!VRH>VNFNl`~j6anky( zO5?^WYAX{lDclh-6y{uP6-Y?k&cQ0YnBz82 zx6LX_!P3ENu5_%!U^%E^^IY*q!K2frrPsH%_)pxvi;uJIg=^hFFdV)KXqzq`3(7Ia zJBYw1-Ue^outzl&mh5db!=f3Iocd?}mK1d4 zyy5(~ltw*!=!bg2F1Cq`b;=Y5`Bd>Z<0^yZc9G9|uCx&inrZx+%y~dF5i)i2sg;|k zEzSCm$XGwqMIz3qn=;1uX1CT|nXnSPYfS2t%vr{VFSjssg}ho9dJ+L{DSkktJ5rwt zTT#*u6XRE+q(}Yu4^la+1ZH+o>l4A; zS&9Mj2?zjeVOSPhHY9{U7{F(J0mN^=FuA`|Pf2nBko*_fklS$LqR%*YC_oJ3;tElh zUaSN#j8+mv*$4{IBT_C{xri3X#jdoyNE@JN?Byq-F(6Qa0yxNz!dEB&l11|_I|wxJ z(UJ@c!r^>49bG`}ECeS`PaGpZ7+)vyXyLvi$qy$HUMXWyaNh&CG#vmKAK;`)nalsU zq+?>?>P!jz1ve+wq6NCP*40tmjO`-g4W-Q`@ESz`}{ z#!9a@@1^Q1qWX^6?nvyQJN1!jr-0pE6t_w_s{Jf+Sqq`mUJ}-m2KM!Uj1$0oD3K)` zQlfJYh#waOAno}B4E4n@(9=V6gnacL)r=Eh9y5T{lGdTh+$m@#?5r7wV zNAf{@fob0}?mT9}K!qF;Z$JeoQiKmI==e7+t^X>uCODyN(VzjI?1k1C0uo_?0Nw^* zE>QwV*&tc|85o|V1JY!m0C)_n%#*+&Tmv5&pDhA`jBv7e2ozVN!$vKX&x9B%TQrM;eA7n|J+P(P7%q(2 z8p&4^hn*mXXCPTL_y|RqMc=N-T!6TS0APPWnssnH4+NS`aA#U?0YD^JlFagkJnaUw z$KbKo2r5AKA{4-4fIq7;ufBib0}gD-a!xP^;}O*CCj=OxIrQ39?*@dc24E(#i#;gR z(Hlc)@}v-`N0$C+1s37(U#}+d@C*P~0+A+Z>wh8=D|OAlkDP$7ISqY6zZ(Fv2_fOx zqlSlxkx|i<1Me5N2+wFZ2j!tOc^kkDy`CD*27^~|obrt;o^QH4t0q>R@i3c&+-Q`$ z46sT`B2z`36J}9gV5eKaC(g z0dU5sC|0=Yl}E-LUt2`wy#yF(Zh`XlXMmXW*XCNoV-Kp8$H^g>?j>|^t~Okacj9Sx zv_nXi_(9wuecWc0j6@lXoI1heVo8qyxi&!#LMeX;9$(B&@D_8>qy+g%%fZGPE8#A% zMKIte5k+ESU4%uX_yHNHML@E02?Y?XRgq@KKl&;uLdmOV6NA>1Bs5)l0>z3=xP$zy$bq;0REU|E5V32P%aD zeS9PXi2_DVaD>HM0n$)r;m>5T7|?DHPDBGF!TR(P=(;b`@1h_*tR29R1H%PJw*v&* z6?6LWr=q$PFXfFB!;FD%Sf_g&ny&d~z|)7`4e7}~z>yUL7E`8lR=M>Z%^TVNcV!Tl zYO}%-n$`4H6)&yR3weY? zQ3wdQHqu-ZD%8PNOG9dyVHcDwn-O=-1D*!-6*CR?gIg9LOo+rQOuJYS5x|CnC=7f@ zqzRFrthkTU&=rzgd7-5cDaSXN|AUvA|f7`?}6df!A1JxQeqg$vB2fNP!+P(}xo)v5n766dEAv3?|NpxR!;FA7C8HdmUZ~6^5*Z*&WGwFsmZ3LezFM-V?J5vNxEiKQWl~Lb zIs@Pk4$`joU)WAC%1gh*!$au^TK!CE34Gxt5r)v2pHiYzcLBC+CxV3K^x$2kq$VCb z`kOG7y#`^Qs+Wd(fl`QHnLF5Cd4e2H3*ST6Lb;W`@6uakL0@79#<|BIs8-|OI*ey8{vXfzxXiM#Ch$xBu{4It7XPJ%)sJ~c2PTk zry>L!*K57X8Dunm>SfPFD9CQqZ~!#M$y(;k*5ETd%lLC5f`|6V8%dW(6xt~njfV5` zvyrFOFfUW!Ih~F`B^&@7ff6%-8NV95Xx{yU&e!*j71AQ%{Zd59?{R0nDd({%K6+V= z^d`rl2_T;a;3BrbY)GFG^6{D_;5@R*LeU_H*}z>4Q3;UH6{UxcEJ-*btO^T_s?-4p zEQ>@xpb;)#0VUD-0yYq`&s7>IJz@vNLQHNTNJ0QTmpkZ?k>UC+j#DP`2qvHOs@p^k zL;3sxX>c#Pr_T8`_la~5#=~0#wiTm{KkG$i23!cjHX!j*B^#Ro`9U_fo7GFS!L-Ol zkVk^VQXAJic5z9^(d?SK$mohqTHIHI@&tY^TiVFmax2sC*Va%0kC6c#(13@Kl^<;Q zD}ctb^CkLXs#^Ky28@~F*5!o9p%K)?h;%6gpm%DC2J(?RC3J(_WS@fXuzx+~#vS(d z2+|((oHjGy$wPyG%h@oo-={eTn*dh=f=!f#F&MNoKNiLzj}<0+Es&$Z!_ZiMa-{ZC zZt_N~u)MImw6wA zwXPl4T3Th>5-nz%cAHkW*?i-Vyn5hAjHbTmqsoMscaP62T-tyf_4}E!Y|j9VnxgE5 z*MR5e9Jc&LvQXT9hhR$uI@cxKkBI?18*wCN!i&c7~K{%!pA<{3L52U9Kn zTDWd1)d}%W#0c^c`~g_DcztoB1D`gKtp)(*_xxDlAkcwYkB!1EvfIvNvA=GohcZ~1yy z+DCF!toA4Fza9fDuo<2?`VPEH+^q%Y|Mhi7dT;$o+i?lk!`I@%#jtSxoSa944DQMQ+cF8TV7%(MVN3Lv85c z#4+!FcgKHlqT>3hy6G2IPWIA!JO%%eL{bzDq;}lA1kbxZI!}CT@yv-im{*~S$qFIU zglAvGb^c+J@5c8t&hV2shIF6L>$aP;&5`5BI-PewKg@5mj^KBEoVVi1uT&P*kyrbG+%c69wO!eEgSOa|LF1Z8s;ovK8%5y^m(3 z4YMDQUt?!Hy>wVT>OCr6WZs&y`MBQ)d=z@s;IQTAc+7d8E8!1kx~IRCZe~|}NTI(! zZM{-db4l9M`;}ZzLV7)I0*`x(+PC}AaZo}*g=2OEJvn}1Xn#E;#HQZ1^wIR4ros=& z>{nas*ahsUW`!m$!8ab>l9TQd8V*@gJIuVl2r3Bs?FEJ{y1Hba_vn3lRoBmZdriJF z?)4t#odP8{rF*{|HTAEDtAi%ImcGGC=e9;&+&)Fm^FPy}Ui!H+8Ylic6bao;Z< znQ8BzsFP{#j^GNmD$Fb}@q=0>_Bcsa=6?M;zQQ-k;Z4 zORNyyHIk&^6Jz^zp;4^5F=K6hF$`^U&U z$T!0rw>!TA7v=r_%RA6w|L5l|ca0`l!IL0TeEHUHr+FW4j5CuaE#){<{J&=X*Scau z`qkYm#i7A0b=8;WNrpXHiH+$hDn(+g-Pqd=?*9M9`oD^#J?6i0q_2KUa^i;2!R4eA zy!~N8;N1uITatlEP4vyH6fCKIwd5d+$-o_kraKd;EN8hpNFMh<)15Q3y|7eD^|x>? z$4k6laE%RO1#Sjxm<{?obis>iqrJVQ)&;Rd+=K^mVkwcer!6s=dVqcx0%JEEOEDpX zBv1v$QK~&QYB)o4lJTGWuScAXDtDff#@_}wSaleZGjxLjh-!>Ej{t%pCXrRo@!Ff0 z%PNfR+y~;rti}-|e}R@!>TLzcgoGBzBe-L~LYc?hd*$X_;yJH(u^I{3oc(-grrT?p z0y|3G-^6AVIe$GR6jLvsch&J{_$;oh2fXP*>rXe9dYhpaSbtmh!)WPdfzus$i~660 zF8_dlfK8v@dTXmzD*c$K3*-%vmfv-!1N@!O=nL5pXmM2mBjdH_inL#3F2FjW#d5W+ zwwWfrg=%r_HK7c4r1*O%GQUkh6?q>nty%M&*xQthLc7JW)>E7l?U=3n^zB*lbhbf& zpWH~_)vHUDVRB6~`a&7=H`6}pww3u0`c6UzTP0b`nrL) z0l_;MFV1f!tShaH$4iULW4xu!7_ri57z_o2V#)d^C^QW7cL~KnVHgYwLzY5;!pPq- zFxcM}Gz>*v{uNFk-c`{WXGuF|w0QBgr{oRAhr_ z26A?ezlNb?Mm#x8Jo&KX$SLIfRR5SIZ$iS5n7@1E|Jlt#E{h}^hQYZ18pY6(L$1id z7+`AT;&Lz+axs1K8te>~4<-iVL+PM(U@c_Iy%L%+mmhYBK1~&7JjC1%T&{mAH~G%y zCEe|TtyJS{@2I9(fhEaVQ<@83XJW^wmiq3ehJMkVsP_1Vvh2!z^E&ha*GMz6;g_hL z#=SRdke3(MC0v^72k8qmeO9b`mizDP?njnYrFVX;zgB*!;o5$d&3yXlq(q9` zNLI&g>qhXm){~QTgYAZs&sN*d9$~l}JS%PVm#QIiA4#Jaj1gPp+V2$+i;jSm5=#9} zNtq3=IuSgJoSV0BO9&+-_3-tWX16EjCM1!5?Hu-1%N3RE3G3j6EvDv!)Qysac2JvjA@+>A zHzrNlo8x1Rk3q>!b@Vy*A_xb#@L?`~Jlc{yiE5_pd6;3+ufz}x^TWBBFU!WwXVrV<<9Tq+ECQt9EK8y2ov2o)JNlpmPg zyUq}&ns2J?sag5e>tVn|ho3mIf#p?;s!r}tZ@!N8+OO}ws7!)6 z)7y}VvO|tOoU%H>+;WMz%cXRT-Av!|{|sLJSOycnI_orb&)rg}@y@v~+$)zyI|>aO zzO$*aIL)eAYr2IY-hHY{nQZqq_F`jvdj7KYJtY}dA-v=2Y0QwC?7d@~!a=Lqp>Uy% z9QAA#9%k#$*`$@^+eZ=?D`y&OIIo>^J{V(|uIUThO53nCJW={laP`$*4NJB1K*NZ&e}Vht|j%BK1{dH%@Wj#XSJ@Z$&`JSR-JD5 z%FtWw4-;vpu{bU|mf%3%jR;hzPkNj-aGhfgy+E#3gX7>Vcw}ZnF>!znYA^3N8lz~V zD%9v&sL6U*tju&ec~x!oV#%pe#U*u)UaH3eTd*^=xY#-Cn;rv^Arm_(kC0a>F;lsA znlAH#EjMIlYBHm)6?HU4-iPgIIF~$pI!=X7hAmU)))9=wuB|9it^9=ivO|eI%{{or zd`sQ*10D63i#vVF?~?Us-$S*ZYGxSs6~qNf_`)@C(~k4OzwY-pJMG14*>4tJ7x1s? z%D5#nKpydH&yjfr^)bG}KdZf4ou3F(hnHVUqV%`_qha}@O+-T~bP-Ei#c_F~grRH2 zrp%*{zJQy@>gyMC(e)1BCbOD90Za-0S>FtJF7j;PF1$PQB-l{;kHD$-GG3cfJxFyl`i6>9vo2_hoeeyNUCIfpT*jnSPFXv|x zl47ei1=+_}b@B}Yco2?Z(=o%psw+nB{E)hVy!S=y15)*9UXJNn>Le8FpM7}^xve+I zrnPqeLY?pv-E4gkYHk&^ga^}A%5sn7p53Tx{oKy`YOLX+JYmqZyfh@EspS|}yrOU9 z@Mqz?$z=n>HcG9L?^zJzBYdNzSjl&}U-fQVA9tmKMV?N6^^|BB(`(~ke5 zzn)uJrS>6SocDZI%114 zrCqhwmqi?G+hhdKI9a79UJZ;y=w0^|+OKfEU7j5L?3!SgV z@7>KibZ4F%@G$;ueGtY|3sao}xA^XkoVm^>L6<#3)Fp+(F4E|(anNse1{izt7Y!&%fX}%4L83uGx5AxFCOnAD;K=v4g7t*>hh+|_!lLM z78?Gl*6_FH3ab!Dez~mYZl6<~;;eL*`OdFJbvmDR70|9+NRISeekGM~K0|w0eVS3{ z%d^smA_Z4BcB6ZYrl~piEk1%Cr#xMI)54(rmC8NpPZ?7am z6-ACqB+9X``(Y;cG2CV6IDG$IYlhnvUQOXdstY3ON@&56?Y4l(W zx7p*qWnVnp!pA4V8rmC2W~P&JvsD-+KA$P0J5%+w3tRI0 zVcm>_4AbyH3aW%Xjb~RK<{)+FoUgPl=Lb#_ByDfCoWN~o$*wZDIItCfWO7)al>U93 z*z_&R-jZJHIRo@0Z|{t-5#hO$h6~2&j}d7D{CtDfvqGQEcHx1t3y`{VEnjT46Jekr zdIC(TIfssSj&2qC12>i{M5#)WCcFc@(fr*?wC3(_*n!vcmRD7j&*mh;>Epj_Za&$o zz0Q>*^>9w$x?#li+@v;cJbd_bVXdb&e`-*NzwSabaVlxP&p5pJ(rMf0dl5=a_e8c< zZV9K$zw)U;zxcS}Jla$Ku3ksx&T}J%n`9+}i?+V+W+pbm%T6Ba?)A~?Y)sWX`jpms z+!?2y;o1o~$nz8Du!>ybu!C@QFN?*~zb)L_C+(%uurfs;o=EDJ{=8_Jb%C8EKCK^G zwrc@##RNGYTsO5Ft9oAMnOQ(|%8MpKRSMa~lrjFm{wVDf)nH{rbC@<>LB_UxtGU zB*^T%q^`cjA*1S@-1XXv%1$4JC_<^Ahkgo&!shH8Y9#lTi3~^Kwwd}U;qhGQJBDKN zlEriRy=G_kzEJdfLijoYTnSgo`s_!2x_ynd`F|IVN%T&=2vpQM(az<%W!(BTAV$8B z*+0;7;4FI=cWnKERfu2@iXWBAr;cz@l}w7Gk?PygAKY8Hacs6T^A z<>yIjoB{igKfc7GH8X8B@}BYcBDr=k9=f^+KM7B}B^fCr`jad4N*J$B_V|;*O}4G+ z7euJ?=@;h3wzz~UYNy|HL^f}6 z{*LDlVBDB>Vb6J%KXvQf+218%+Kzqw5<^)CH#8PnA{F0rM;~x8n^!3-YGr_HA>!^}M6cF0FXMntGogbR8!_TR@fSis_l# z{$t9`L3_MmI#Z=tycA_Ex;CIc3kH_Lj< zxenbgXc1fT0pJV)CJ5IxtrR_UjN)^UTUcqBSECy)PE zmAur&XT$kir^go-wiY-Bu(Lh47S2EY_UYKjh4N7L+49irCud{vKz1gos=`313{B|? zgF;8jJDM(vF@@Q~@^2hcA%Ai|)g%9t`T?IBT)+N*aLXWjR3j|lFpC}f73|!5&{HF1 z5^@(bOCo!kTfBjK=YiLQZq`H8zCtk6(JcFa^@x8y1V39#AQNG29brve9GU!+vRzca zt$xLRTwgGIY>`o4r1BQl^Qc6iG%(Dxqf^vTLPP~G^T;~-MZwh~d)9kL6*1?Bty6gp zU1{fX^0?iFk22WrFxq0L2?p-^k>M5e*-b?T;U3pT&)Z><@h`)J>zhPe~MMMMPY^)5$!R?(#q}F#i zZEDk940Tp5p3A=s?;d3B{B=*-LsEbnV762OT-`ZQHTZX>XWTiSngpzb#tqLb-UX$Z zhK`+&3U@P;{3NO-v@?z;(zp^ZqZ4<3z9n+>Z-hPJRZA*B-^6K;H*|9?t@X_ z?~XYna=Cctm+_tB5SjO0X89GTVeh{`@3NFnYqo>ADruW zTra4lvPj;F?h-SVReB!Z(tBZov(xWQm#qV(Vr*ivZ}tKk-J@n7oxGffG8G+H>+%Z3 zRiz-lW4FogZ zTNEu#M-y}5c|er=-g)LpEleBs{}oWL6j2R(iY!IgQ_D~+!SBQ15ITq+xCX;Osf(T5 zO_hW)ZgbG1%rEokl5YOFcpRd!r@YZDBaBC(v)$A((Qh9f>Fwc zZz>L+Ip$to5Kf~*@7pf;KBY1WUNeGsy&j{H7c>OQW+cClM|ci48k+>|g_MN1*b6;w zlD6WiHF13`%-!~N_EV~aI-eR}XFn^C6wBPF`nQ)h0>7-BT$Y|-p!fJ_7Ukyq@{mie zestWR{m7X4twk7iwzD&GMJ`8^`0-xxcLieonn%M$*@5vv2T}EGQ`l?z?})5l61 zgvs)L{52J-2c`;ZDvV;ZpP&UM_OqCkPeIdo!gtY z@7W^1{Z8)WU?ZfUhtj=M_-At9^ZXTNJny~MGT_b39#`Xa7 zNK8qCrTWK@=%?|s6LTfkafGjEx?jq6HX{a`F5>4TX`9;x?$VpTpgfq{kKF}+8f&-+ zG6`AD6E4 zvp=Q$JE`0w_H#cfu>O*sbR1i?Tw#`u8Qf_jZF5}?wRPc3ggXW~g7;otQL=l1l?fW1 zvn_ab)lkSRX`Rv4WCdQNK)li3|FZ&iKl5CWxVV)L`DR|JYL-;?;P$7~yI1INt+OQg z1Snoa?_u7QP>R6fDG~aZ5)e$EN>DYbdNt1Psh4&jPn7mw7l1q->Jv#3l*z(eP6^v@ zT&!>iJMrDXgzBN6(4^&Sk;Y>pB!XUkqA~d$uRE!pr?LxFy-vJ;&uEWKaIPp z3a~_o*_|}$DecOL^FGC`@B)};Ft2!4UQElD7uWQ(}2k6Dr?PTtPg9 z<1veoq`NtGYzXx$xIm~?ooK`NIhN;Ly0qa^!~9S>KK zy`7MNrBRGowoLSo!>Y%N7gs>O@etb@gV*Cz;QDUR*L7{-VVYqIL`rOuIZqaIW{eZ2 z&SqwnGP;@=Ia66TQ__79p_*3yT0!H$yFBR|Y%hvYd!`bCd=fD!RGO!A>4%lUI}l1` zo1jKGe{d$I=E4umb<3fogth2Knu-^-yT)x$9J&NT<%FV*v*6BJ#C(!fKvRg$kZdpp zVhQpOa6(Wy)jG*JVlG||)%P+46+q=#9bgzPFpSdQ9+cr~h3OB#Y4gt$v{9KVK)FFpMR z{8{WNd=v{V6K6W2L7GYaZKgbGPq?IJ2;eHpM<}aAFqTGUzb)gfdJ*GW!sJEjC2JoG zMdfv4AVw4%Du67>mt%h}l6Lj!Bj}~?zJ*jMvnn!+cF>tEBD}kB@hVCC-W_>S&}E1Y zE5dv*?v}QuEZewUXahYLp?&l~SZ2m2 zbt+67cVS!9on@Cwd3|x>B?QfLHybC2hEwy535&miqcCHXW=R+cY9Zn+0e&sCv_3=> zzZVL^$c`|*rX+c(A$rdukHsYw_<5qv5TiWh9Xt&s<7-2`5u$wYOLZ7oj*Q>n01)Qa z`WX&H3TzM(Cq;C@K@9+$sOCO_E6Tx49dT0I(2?r27%zlj2D}iE1v7Z{J*5Mgi}(=_ zy9jVd18-%t*b|kMd2gX2N-SCA3xKm|L~MzO9ny;#M*s0s|s}LI9&mVxU;=MB_6*aZtjBrO{_N$9$JRIpWK-@nw-g<2IJz z5ZW!^Ck7(#8}CJFN#)4%Agvl!M~@z=TpKbbGeiSWh5_~K$<5h7^Hagr^bG6SRm+<+zwe}*tUj-$$*+!(1C zevCWvk@)e1z*+v_2&A-W5BAZkw=^|KPmfockqm<=7xBqF_Ko836uw6uP#{zhxba5@ z;BVsrBFliLW8jM_Nerim$E6|2Q4xcChpDi5zc?t!JjvJB8=L?>Ci>x&m9f}(DVm3f z1HQCN)ZbC=(%?v~Z?sp2A2juaE-|>cPa4Oki=Bqv0NWyzyN3wg1WDx`RRsjj#g{=E zmc~Sp6bwQ6AQ!z55HU@{A6-ZfYKo?@WFKZ##uHD>2ZdoH!s$S=IO9zdV#J9SED~|O zpXb6)9tbxF@Kd^PvA}zrBy^q3gRP*_ULzA}dlYfBX2d_&qLe7eKgpC zcs!B-3BseQg?Q!ob#);w+r<0YC@ZQUD>THt2!XhVcamr)BbW)% z@q$_)9biGAkp8kXtaO{8j?rr2OY~((qf!PIU}@rXA&Le&(~r`Lae-vg8J#Eu55PE? zsT|*div`fd#&}XRt1J4NBN(j`W+9(Omg<^hf%%s9<(a#vdVtccrm<_nG9D`64>rQp# zAD|6Ngwd&vlL%L(6>*&HR9Vf_$qjnJ29uhXyc^ z0AI?-2(HIXXDtX$P_i0jA;$NOw80>jC@PErn59WN?2NTwEdU*(M-DQANXkv^q7Tje zQ@Y(1II%{G87Sd1#adzf4BmTDFHMsC=b^TP?|`fj9u%Y=lLO(lf$G5I2|N*N-K&HE zVkZ%Yl24h(hm$?ps7Vo=+@(e-S;UAIe2BEN0b3UJ8ohoDy?JCUUJ*ADV+iA=AH$T0phWn@!NJ|(YdchLugxMccN7DJ8Cx#QxlI|M0&qHx8 z$aM{n`eB3-C!@ub0sP;qz~5&le) zCJY7}C*=Vn@u49( zr}U*EF~|Zi`>SYx{IUSF{eZJZ#8DVI#-~vTpECg9xPekoG#U_20uoTcQ~1=!G?>C? zJ!dhpnohvyC=+-8ENRepu=NGS;3@zR!W)%snbnbz<;VzvRzzMTPh$lAR7J|mUp9GE zX;Yfo#9vWRAVs?swa5s3wxjTF1C$_VZvn4BUKl@>On)w^~BK_n!?w&A&1Bdg}L-6Y%#?CTKngf_7Ho()|4V99U&Da=aH9BZ>^JlYI+m zVGOuvD`j~DGZYSn!D4aB{8n}(1yBV-4gCt;K;%<5DlNnn!`I5o`=Yeci&V3rh>#~2 zTC<~e;YDqfl?)+yRMk%zEgKb4$HCo(+DbQS?y*YvT|ZI(QrGlPk$)Ba|NZ$MMc4mu z-F4!5I&;p3gY$mcuJKwid3LGiRSyu{usx|L$;LNrAtF303wryk8Sx$6_ zY3k~BAa(5bz(KD9n>MXigIm81YXWqSUS1b&$y+wRJ>jG%Qbvz(Ov6Yu9K5%r&m`JE zd^-?ORIk{q?{zjbq|W)Qet?djLJrNh=%yOgqO99FlDiV%)eLz`?HzPm!Ii+ZXql5Z zx=_%yGeISm`O?kM5AQy_aSbFwG>80cwgYA%#~pSyb=n2I6^mzx!HMVu*h-9{+9f7t zLz^O*m3KSVI)qoufy>-|noq0>+-kybM>noW)jMq6N4YJFv{=cHjZ8#>Kk-8}J`8HT z&OalLWB&L*qa;z*D!o1(Dd6#oERj6Rs7#E#cGu8sv-JCXn=&qeN;!1MP0*?#nD$;A<(}aWeMb&Pf^1eIRMFYq?pnz9p%jcTa$SG5ue(6oH7c9gUn+N#)nMD7LlcauZJ%YvQfgLf=SYG1mrVUKwAxZwOeRe|>x}c4F`# zH)CI*7xr3};z;RPRN!EUgQ*H6tebh8f}!14l{p1VdNr&z7~%1@y?JAg`wnRvhp_=9!VA8wd;R z!?$ilLfU!7`Uz_=&DfvkUM?2F2mT}B1M%>|`7;aJ1p=$M!00;OA0GL5!k`!XS)GD8 z`BuKnAIN=*rs=B>!F^G`-oaF_sEdO=v-cC~k7<`)>_;Dh32~*Uu@6~PjD{(f=5kAK z#SM)`x{X{?4QJ^+!JfYyytFygN-cJmy4!2$yID!k3#TgqGY;L4Kd~{zdYz?1Yio}X zehdo_DSb|F>1xiSQ=!Ne`#AbpacR~VktF#xZDbbop=_cnr zNi~^Jop=?ATCst_&+QeX1uf-tI62+STN-SAV4R>K{tj-B#o~|GwOAG|u>Zd89Z_!c z{U9aNdBg7u2eI2zS3GR-n>s_rB5#>^_OtmOY`hFlXu0!Vd9>cP^U9lg!q*SYQD?m9 zFD^RQfAg_2dwvu$YK5@kOZ@Vgm=my<;C$V6=4<0GsIPd#gLi_^pr9%HPbLk7+;jSO zuV3E5Ki}#LVj}sn)=#i|{@$tX^UJ}6=FQ`GHt4vq`_UX=FVbyxW+aG-&zQdT2llF0 zqm--zYi=1dw>06=;=Y1_qh8lTKRf@5cfB$FnZ+J;@-9CtMr9~2S}F_{Onk$pON%ZE z*s*M0e9sdeCqE+m?0P0i=OP91%@dAbskG1YPA|n=?9wOMrVLC!u4{Xl2(nSfPxpw^ z8@Lm};ops}J|1&0$F2+sTa>~Z*5jl@%1&0R78wiM7?m^cFPSS;siDR<{n5{UIk8(E zM`>ssr{!=p%VwS*@JQ)S{Ldoz+M(}sz(rm zM98H?hzHJXhCDhe1iyji8phEGw@T3E0JJ|v{>9s?z;bRl&??}M&!+v{E+Fu4+TGea=x@cicz!#KO9b;U6Jn^-Es+XBGw3_1bt3n!#|p7 z^e?5s=84^Z!S-}Y5X_^KdCyEM*-F^{&RO~8 zsZo2s?ZpIv_sC~Hn5@}me@dFdyuXO{1TDraMDijS14>SQqpES;(jgBd+GsM`h#EY1XeHL@H|nlv{ypX=BTVHAS+6QEHOy6Ndf8Kz>&Yq_OKK4l6x9%-lGOsl2$I&HE4j{}{b#L$1f1!j#q9y)b*#j5INU$r4e*aj3#i}+dZa+^GfqTB(`G@bf zX~&w-tD)^E-7j>CmRZ)3OIkF(u_Y<1<|EdpY)}kVlu8TaTA0qq{r>v4ocw`TFQag&H1U=H)Eh`%*Y8@LZN;zx3f@aj2F;k&e+BE+9PoC2=Pd4HwahBd zHDb3?QfR!tpsOe`K2HVw8aaAu+-5nz<_~>_KxP&Nnuq9Uzw|wv@drSvo!6*_S zja$ITsT_FXWJ!x31^rG0hr%$uKLSWYP&pkPMB&W3Oxi+VfkUsyRg9bHzIKwEZAi2{ zF)RpVChJd(6AA{S@0XGNMepBR;~__U&G*tGt2k1V)uDaPzCVM zOtxZ65d`YJf`Kw{a?tW18ZejtA4WwS87e_$9jrhQ3LDMi2oyn*V&{J(79_&g01$QH z4{pJ{G)J-Rv9$4t1Topu(2fQ`V8F99Q*@hL5H|6r2N3R$Tp2mNZlC;JwX22*A+PPw zVM2`Bacj{dpdSAo05vi0!($6b7?p1hUI>S&0&^N3F3~0i>P7&9{+h97?T`hf9#a-09kEXg%I#Fwz4TX)w%z2lCgzp`xp+UAQ*{6 zn=kQdBTVt-u>l|!BK97vC?fb0PWvG}pgIlD@ruZT$i5u~Ed!fv0qr4X_z6Xtm`nr- zX#>(U34@=dD<%xMgDOriGISLSm{WUG!U%sH1Ihn_zS3-Z03&Ar;rq^z3PAM|2;!UO zDHqa9Mdj$KN`g%E*HZxW69CdkKo@u?2w*W;)g0o4?qL#0doLTT)-|6^F0ZG(}oD7KVK?_v=fo&LfF+|ok|pk ztbyKo^q!AN(t|`j$0=R`x@-ZpyEg41OaRCjSex8b!5f7qg%%$%4X~3V1I6hvu1F}a z3H4P2?jX;wsosxv3`UX^JDTG7-!M%;`)}fuaqr~l!;uW0R36F%(_)LpIWLR=cwLEt zrc0YB984}Rb5>v@=Z!d;4H;ECXag<{i1#l6JgVxz0|W=)R38&xqJS9#*hHuLq}WwN z5>%;zkMcW${fKl5p3!UfZAxixJ*ET769d!DC`6=&T%DaZLciA5-4 z1SloXq^K%7=%zhF^t((LMJN!Z8lC!fE}~2(2p7T|(>S^kfK}M&FBbf9Q~@{`6+oow zhU5hSmssOOgSKH^X4R&y^2Yd-M99em1XK+J_BaxOVK;l82Qhjq5Nt;5`Zw@-PXTb1 zQ$7K0%Lc;7Cst#SB~(0koN=PkNTeuT#+YKl*tfDtLq(|TsGQ9T@fVJgbtR}K>UyTX z#Ba#j1?{7o2CnxlIO66c>b47FesZ<_6`(>y*lZXeaEQ%MfEY`T-)JU45QJ@-`-%$E z4tP*6G5)Mw2<^{cNB+HyE4{h`FEuy1jadx(*0_g3F1uWKgAIgM}@# zN}@dlti1&U=)(Zcv>>!EKDw3_TTchz3~mQ+7V{_*&xYWf_>+U4r~6;FcL8NVzgD>u z$OLeyAmgAA|0PolXlEKp97=OZ0hWn=9N+He1Gs8em>~i9)`0=MpoBYs={lpoY>vYQ zQO=T^Ej*047H{>P3Wvy2xt<82Ad@9%av%>Pe!XhU03Ahah1TI0pme(i#Hg69I)E7@-QI%VDgq zN8fd9qV3y-;)od^0Ttx@J!<6`0HW>KoLt&e*a1N8)kA;|z5mU2_4h#BzQ7}Gfj<YQItrlXtY6QYPXsq~?9fJpOn|`jZbJ z2*C`%?`I^2X95Kb`&jVNIC!3>zx_F=M{^#t<`CpTX_2AUz&HZ5pcvCbv7DfQw1?AV zL?dxn4>p_uS5Tzg%<3UK<~iJ|8LSQ%6?`FpS7N#euK=(`fdV>eYT8jboZ1wucIrA= zY}VjB83=*Us(&E(vbY165h7L!90oEnAqAi?2s!{4k|f+de$3hz5SjM%4KooWhinpd z0P66T*J15TJdozFWcC7zfaQlpkTg`dWH9-_ypEL|)i^rS&opR)_?H4gXky3;8!tde zfRT{43M7VY$cU}jD2HD1IN)t)@Jtb-{RD_2ZkJ`Wo5xfga#DxkYr{@Jb7=l{dL-#6 z)Da{C4(JvUpd!L0f`I}%WxwaBI#Tz6u-z>{P}My1_D-fC&3j8GQwkznRVKQR0H}R4 zK5$9tKqBq=`8rxe_S?Vfl|woW^8g+JJ^<+3Y0-4b8Kg`Ucx++NC;qbR8PLRlG9(51 zH5MzsW z8HCRm)L4wS1ms5&6H({Tw3D|}32*}Zk&K|HxMG=3pUAaA(%dt1^g6f-%c|GIReUKW}$;I*#*qsKUTv+{!jm}>GlGA9K34i zYw$8C3rG&MG!$SIU_`JYkjhk;YC$4QwH_wh&O_#k)DsbtB13=?R8*`Yj40z`GvFz3 z7{rH}WEF-_^D~_N7NqhMYpb9Awxum{J#}YAwZI|In#B2S!0~oi$XgT8WQc8vBv(vw z?J*j1a!RV$MxOK^@&6h2zqch-=M#NEg$CQ|=Ou=2abLDw&RU9GW0$OQIc0C4?paT0 zLdjH;;^?{v39Y3qG|p_44+N`qB0@ca8NL!lnXeRQ$_t^`-x^Xrxt1gT{b@I4l}7Ui z9LYCYANBIV=LoW;D9g$Zk`p6@=NNDQx7q*0`+w~-HWa)_KV0p{&c4zAB?=YAdSjD{ zS6G536)9GF?V;~Etnw578v_UTU#$t;8p=t*Rfg-3@3kL#cTWHuA4vT@Ec$*{ReA8= z>DTevYoD`z?bUs8VZWwI2W}jKFa6=tU#P?QzF?rvD|At=7ME4w1tj9w>v>pKm?jjs0cVRF>$PQ|nz=Qp$>$-Y zaC%vi4@V!cq6-{Q(mpb3^1plW&c}MM zSE&Zee(Xn3=$nj=@39Uiwf%Mo=4UaBd7;o&{}Ntv7BQL0V#uas;8sFc2^Ahm}t2^75x z{^l7c`VUC0ZAg^1*qWbVzhX1W-JZQ?a8Rojw)`ZW`EikDyyxP*}l&YHl@GzZI zvwXocHJr)w;$n>HdP?`+MkXFp z9PfK>qv|NdE896OVL(Bq1PO-D<~Kf?d}hfwy(VuXI_hZq7Cj@drk*{MCgl|blhv|{ z-_k!>HJ^5znQ9Cnz36+Y3;VSic9=YQ;OH?!u96y<_(C@ACOG}%mj+Hk!L;yiAk$<^ zck%Ho{nI0xue;{v0TMKxq{T66(J%KX9E6&Fvdk|kCrDPGei@)XX5CjUHJs5XdAbr& z4sUa>F1b^>@o@P-abf%?-BfE47U|*U6XQkoMRS(OmeIh|tAOj zgq6ot@m3I38r#}T-P`wNyeMVd;cAC(mw?jO4{vCQ^1&86+39ibWyq#9MP?!^R?dbL zS+}{KTV+oWERxKxYO5TBN2H{cB@UdoBi#&1+Ejvkl1Q8rXa2aKHvsM*C31)A29q`2 za%lbuxwkha|Bc+MxGvybIe@tISAMoNhc;h}NxAAXbv(K#s&lgX6yfx`k-4Hd+Ok}^ zneL^R^rm9o^EN?M4m)D+Dy$F!TZBu5dU#CiTFpfyfp`YFn=nV1=v?xLXerhwZEI?- zDUzd#KkRA<;-e$@<2CZ0_1O9N!_mF5xZ&^Aw|#?_=I6YboigQD!Tj9kFPWreJ&DP) z+fv`j1WCZC9~Wt)isUP1Cw?-x@N*`q&33yS`F72ot!GrG{C`vH5S6or{~JC!=!ANX z5BQsJ8Jq=VrPS0QGJrh8HDdFHhhcDfnt-q-pu+%=aTO@>^pPL&d!G`Z&@51<9Zb{* zW?KT&oA}Z3UEu%i}AtiQ~!*tD%Sx5M>NVcM2#p z22`#GGBp8lI)G0<11U;@L|=haEkNu}Am0#>?1&CD0mPdIN*hD*X}pL7fZ!E+#&{r( z@)l57_b-HO3c~!Dp$ZBs!X~*Q%>+tFHzV+_SwVsHoPqRId#vzfR>*JGGGHgeqZvlR zZVp%_2i6HAAdi`Pm*d>;?Qz@v?auwHto!=&`^DR-eF*01M`sfZ&0d)5W$&S}w6pSX z;%xaM?nG@o74K6a%G#hW5dHMIWK5*;=#g$o;?c;TZ8pm+>cQjm=1w1w^NC{q`a2us zBzs-eV`XGk*|S3Bx`)4(+!8p@<&pz>xte%g#ei*(OQrl?yoU6vQ}JJ@K-?4Gr-+WViL@oyK^rMMxo zPNKQrf7X6`GH4KYyZChfHYs`gm6T+)7t?&yUXf@%`MZuWg5~ndc&%=xi*GN;rnEMG zn;b9HeqKAOwS6A^B~ERk`gYNFJ(EBx#Ux;YdZ6ts>+zJ&L+cZ=AHbUkrFb7>)y|sp zP(H3)J=Q*a-~-!>SSpm^Mi<5a$-# z7!OYoSAEy_>p5J@fqNH9ata)EOD1?j=@hP?@CW*21XaHgv3nAEk0xxx%$6qSDwh3J zwe}L~&MZzNyR`N(P|0~{^({Q2y4~N*eD~0uK`nNXh0`Aha~ zb|=Z)E_Q1MMhpv9aFlnBETAtJHR)IOvA{W^*L8!(?h{8n^dG-eYFWp7wc=IzH}jls zI!;)7`DpFYflq~-@Y80Z7Uh-?+R+0ANj`VZ4{P87~F3rJyxEi^Ld%+&l(8!=A zbd8h$vRouFu0G%6PaLl6R6x4DfkP8|8j6YMHINT4nGBzL82SZByZk|QeK=jh;r8MP zXDy-L@4cDe!Z*o2vFQ%K?=Lh$2U?^Izs%~TN3xbsh^7XvkN7)hS}@Dv=PZ6_2)#oQ zj<7Yi7tMU=A=P*}vaLJJc%Ft! zyiL{hd7gfr=hcb*{_m0$5QsyFVap5;u?B(e!b#U_RVpWu85gIXoQs}DuP#P_1=u(( zgvGj}Zhsvg3LoZM#2CaHR3@?lzC@Y$?xw6qn(t(0&Q2-$z4Umis!puqB5}*z8rxQu z35kTWWaP!vlvU^*x=A>)66hVEHMAa3L0B@Z)>_Yp&+$o`xPs`#_dai8Jx%8Ii)ZT$ zd&o}OIfnkzODsX6{?vt)`}!^OlVP4ga84_&}82?_?G-;fDJ$NyUHGRIXp z@0`G}HZ2}Pv6OF zLg1rI9?PCt*I=3LL^ao;DI>KgrLO{UWbrSQqKJlxPDgqosDz*{9p{CiPP4N^k=fk# zTT`zuCzJUZe;f4wEO-d7OWp)w-AkpoRd9K45V*$iVDGB|n6+8r0?K=FFlVKoce)%% zPMC|i4?!8J1=yq?Gzkrs6H{Lc<*8XES9xWA?i4$$D){|<&KV%~J@+8u8@`8>5Sw}q zN@sB{^X+qqp?>!8nkvp0anmCXxf(0lo>K*ZS1C zJYNor{RGv>=8q?Hbr;P>36_~<7pdF8>31=89iwT~+3(1Gj?Vt~;Z1%J47@0Ts{ozb zWjUgH+=NHVc*G37!@%_Hr1b3iE1p+}lRIIhv8AgYnXC*HT>Bgf#GGqM-qa zM0%7N2j#>e8{)K$QO4RG7=_6YCJ2fXg)&BZAdx6kg9s88^lx2OrY+*39p|B~0z`_S zJd8zxL_Ca{fJ+DwV^kRmB_e`^a|Q+FaB?C!MMOl5wUI%bLB<|PPNaxOkhTaC0Y`|4 zAmErIoCs}B1j0kx10e!OXd{paPB@$sfdq4kcpx}2V-cKS1Om>9kpaVv;TUxY<9}^! zBqt{p9E@4Rb8;|k5eyC#!q^zeX^i?e9*mL4D8drKG>~980)Y@gAw@Wm#&B&oravwY zV;*A~12~0wVfHXKun_;O!A0O$a3mbAjWL3GVF0N_@I>I89{+r50hRsp1k(oxV|Fm_ ze}ga{h=4KXIT3^y)&JaplOnL-{}f{sV2;5(JTSz?m^Q{GJOmgMCdLEISUk?IT{yR!%{_RV`dEhK?PK>hu4B$Y#fIk%k zGH4TuVEhJSdU^f@jfJd4cw;oFVY(>cN*G<3a7f|8aBn!=0OLNU_0LEp93F`fMRHnT zRP$g=B*HOcFi~RS^Mjvjb6`Xg;e+re@MFwSE%-e~j}rVT;sd5~wtncQ)i;;1i>t~+#NbBRS;Lhm$_-gmPa1pV=iOa|dW*d&Hi-raDpZ^_j9sYf9Y%U%4^^Zay=C{tvH?p zi(-`qqIh42Im)P5_)#4=iH;bpfBR;U5T(sd(5vb4szEU{qhe=>^+l;~=TONkCT>}$ z4+Wn#U<6~Wa?47Cs!vF>&9++6Q&WuMswyY-r0j3`r12eVo|SuLh@UQv%#&jM*39*k zp?s}MMg2iPwWdWXFJFk^@nu0b%HdoDefpSfA=TPDdf?66(P$X3M%{Ma-l*eP{tX}B zi+8vuK~XEuyM}#j5>$X|LS`5w;QKkS60H4Kv3rcR{NyV4at(Jeei;a59 zkf(@yN9{~xodwz`a{R^~EGWo<2)8u6`eS;BBX_afJGz0EUcMxscC z^rsmC&E~`jW%s)oFI2|s%&`TfnU1oZjVkN54|SwZgLCQ6!(gM`a^IMrZsmxEb*&Np0Hfw7>{C8F6iw3aQ$F%SLh7Wk#ggv8v#^ffX{UA8Z!7CF)|(`iW^q3> z`n?>Rr-E^~6H^z544=zvXQ#Mx!A~=#R*>c2M0gbajX(0?2rFOt4;@548?0`WrQW@F zeNy&2e*yE4OsDE)$D|z8oVJsKf4P73n>khA6?LuU;6VGWHD*qfhYO;=?g8FNtn)H0 zkHbv&M_pv9W8F86Q*(dTjXo`oJ=%c7S9nL4n}Q_Zp~p55$qCPwf^_G|rKT2vsBL_s zSCI)9V4(#3$8I?!bS)aK4GIKuZdg{1Vcr=M!eji{xvy}F%st&IIdiutT1kz*b^n@w zatj8m2G1CoYn$E--${n^lNPOba)mVpUYt4nB*<)PuV*8oesZ**grKvV9Vh+q@<6_t z)oCUq@x;fWyoSXB*zHIiHIOg9KfmlNj&=dv$1*2#&f0QY^7&3(6+e|Oe?{DT4!mdW zWFxN!8-=bIy9HP6+4h<5*BKxmz&*a>aZLh*-cy)% ziV{c;J*y-3M_A`}9j68f{NVGa=Vr-l_;>MLyKIT_kczW6`sbJX^}MXOnm9(Xu<2D$W{t{}~E_9M{CVG&~5X|BAt1nJ-oF!+0>q zRE;I|_3U7DZo%W5*khm5FVVOvl#@E0hp$uzOx9<+KjZu+5D0;cpm}XfKg8QG7kQ>y zYX$3_R2g13L$l>ZmSrEDo#6@<9+&=-A82JM!0A3{d4Ih9hJKKAkfZ+i5%Jqgq`)cw zN*Kex_U)_Udc&r~FZ||SBe%LJoh^L%yL=@2t)O)q?b}46w9d1yEt+1naWFUNi3_`~ z!Q6M*WYI4K^_4{f6vc)nX5PFZKy7@5~r`gk~_4-+s#A^^-_iQLfAEw=R)^V&OQItbYr$g5Ud^ zL5M~OKlgaO^FW3<*XBKgtwpkGmGNNP%Uq6=R7aT)$!4)~-#1j6g1d6R^0>Tq1}V&o zI<&MpIE{peo;{o2hj;SRAArX-US4hY_%&v@@;s0>-TeYx(_0`!y``b;uqA4CZ0l4^ zhx`9sD7ZIs3v}(YyXqq=w)p@3Ut8T>yJTv1or_0k+hJ^QQB`8-t%ozzvp~V5L zMQqEh2O%UU_h+GHM+Pl;exN{GT3#NdNb1!libqDTzm#33RMc*zf6xD(owDkV&HY14 zQ`R6tTZ3dyB?>jka9)zjS6WtK-PGy(#S~qVA5b94uW-tD1_8!R`_9U*z z&b}6X>y<|Jd*u%5N|{6K;lM@AYicf_VWs=!X}qiCR+RvsYqZ^qa9>PHR{zy1b*)Hn zASq?<337ND>&4)^U&MFzvWdEh!JkM(e(?}^aX&(#mw8}Jd2cKH3fgE8C=oPcf%pOC zyxM^L3x$9_pwG%x#T#WG`YFmZ)jxg5j5;yJ0b$#F=3S%=PMv5nktlKzb}eHeTVaw# zBV(vd((#{PWF%<~TeZ1Nf)Mi6d%o1IRU9o4qB3~qa?z72-lRO>w6oI$Flz-EEQm7S z+}NT9KH&USb{xE=@REa+Nx{G4?7*^jlz|)fduD|+^qc1HQm_Qj@upRb6m z<;}7nDP)%JxzWJl-er_CedLJ*L=tv8(~~@D-sgS;t>YGUR( zB$dpi9u77G@hTx}D!e%Lr3aPUc(N}oJe@e4exG+9cpOZ1?eO_r&C5jja(w)wSaGWP zDlqxbknia0!_#W`Y9wgi>5M-s_(AS`-AX~@Tkvq(c8B*LH#X;_8g>oi{4Lc4AETUe zv7~bc$zZ9!q=?PNhUC)|6(xU|7oFEY3u!%n6;s!aJFC#$9}6N$N9L{jv|lsL>+OR~ z?3c3dA_A_%`AWfih0Tuw&wtKR<2HeMGsZ30mWYX-V>m1TViXALi0oZ z_A(L5oO872IN+3$g{wa4Ux=YErE?dA`hpK8u%^GZC{ zW)_lz>PmFF)QrU`m&+PU$?F4#6o?x@Um z{ji4R-=lD0zG=IO*O|n0xP$p~T;cd*r|}2Ghx-_YchRG-X^~Zy&)p0AKp`J#h^D4b z3@Cc^01twguE&ytMHZs5qugdK$qWSpF6nuFS~xVwMT{ev5&?Qe+}SSGo*S3KfGt~x ztU_qVU#F|V?B{PJm{}My1AWAuX8D|p;-x0H%e*gyG_QOuXggA})0i8BZ*?(z4(B2Z zs3@)ex_cBfV%TXRDprkXx^v^oLQz2TJ>uLWcl+<4S-$Ro9K*|czlioLzv$+xNsCRB z${pQ756JFlb^a{-WXAqYUSUDraVRONtkSAmH%de1 zbTa0(=-iTVj`fP{9%RYZhRyxK;WPP?-(Eh{bno#a}Cb-KoYp{=^f zC|bkuM9_OyYPP1PJv;0_Z)y7L-uq^V=IpNdk;dXym(8~6qzVC}z3;;VGpT|i15{S5btwF+}p=TUMQ-(o6T+8s!`!;f1h`PPf=#m zAn|50vE0{h-a-G~8w7(Ea;piCy7A%nb_VG4#ybohf8;%Wl8cZR*0q8i*|vy(`dzEe zF&QtOT)*nUpTQ3I*P@wj+8s!)41cHbC|E?2)7Hq&>wYS+%S4`G=uyw275T?(7JembaUv>E`N}w8Sbr`T)4~=-8fDwbMm2SCxSRJ z;mZf>INp{~Eab#HC7>I2+Scu&O?B?^G~tXML>SdXwOY{kYi>_};G@?OA)QZ@dcM>I zcg^aDK$qhL=Aew@N31Sd^eGGqqdGqq={9y-NgU_E1hUh(`-bI8h8zIO2h_I?>LeOM zfBw91D5@m0Brn+|GO6+XvlC-}>@vgvKQ%a`bfdfR!xlYq`$U zf62DheYGyC9qDK6tZ(-kqjSySk50)Bwsa-Mld@W*^h#`o|6)v<+7`ad7JipU+*Rbi zm8V_xy|FJipP;4u1mlgl3CsNjSmzm~#0qH5} zrti4&`o%~9gvMT9M2o$`>U2GxRDIEvChPYJ&OOX-n~~_26XFiXvhjVBguAV?mfQE* zaeMjqF*TwVmJPY=5jP>?6On0HNzjdqi`*<+7L$E5BnL~lJ#)+23}8hZJe#=(_$nI- z00q=|@I5?Nh0o63uq;P!6@LD1|JjgEC&)3!GCD-AHF{qp6`qiCoa{9-}X5?kN{cFD`8?gEsdB%~Si1&ziWkW(KG^ zjP;*dn(qoF?z}FK1j}`hykWFydPt2YNvR(#TqH`K+Y8Z%+V0C4cMpIFQq*AOVa;R+ z_Th0w`vkh1nMn|8XK)98U0<@cne(9)D!$BZI{nb$5ZnfcDX%}eR8P5SH4#AdXNS6= z21T4mlTB$Ya*Aus{`$S9*@Fi^Q^C~|^Ua9KHl9k2E{w4rsgg%Ru=>T3rRQ_~3F!G?BrD>kXB?7@#FM1b*F?rX_Eykxsb8%vmjYKt51Zykqa zR5z~|(WfaJ7BExKO4j7uu?_n5EiVDjQIF~`eVTsieCuNoZ|vu67TU*F#~@(X3cMH{ z&=_Ozu)$OTzfMpI30eO#U_`ytzqnGNgMobnnZvJx1~f_EGat3c`eUBY+;?^rDo)qt zcU(x!?@`d$v^_vv@S{w(k-w&Ig=ZfPDI=7F9@cKP7n-)W93N>Ki5I5~m_V%&b;Ia?IA}n{J&uy$ z=z90$c`Q2TZTg&(luOR;4;gG)=clY;)T^^lR0J|h^a9;$u0?)L=!W8kuU%`JpR zrDykqZ}!f&kx>C2HC@)((pGf8H4{?UcH=nQO3wopPk#vjh6$c6!NH z_06-o%BJeD=2|j&$5H%MsZUTpf~H65$Uf$j)5dnPr5}cokw*+dmr&S;b3(#)`(e@8 zzxKUW>|WE&3M57L1ZD{zx;jd_O*1xZ)dQ+{7OmREmK6(% zGZc29qQf48y&Y>wk(7^{`$USuoQ=k-n}3>9zC8B&xnEhUSN@QEoeZXD%V_T%bP|(5 zSduiMW|i?MA6Yqm7otKD5@;@M>r&$X4(VG*#FFc>-=qa^B=ah9Gx2$w5Zrvmu|fOD z@VcMVW^vq~@T6AI7dHs=O)_J8&@T&HzFuV<{^dPfQ<>pF`H5h;{^hr%)ctt#Ch@*) zVM20`K-~!Uq>u#pH64dTJ(5lTldbpATVME{V8xBY2e##qH}CIpZ49;)Z$JC%9Niy! z#{C{E>-a$)J4O@A#hsS&u4$He`$9Y0IW_E(D(hO|(>5{|!(u8meyv^_ewCN=*{@RZ z4aYZ@nc4UK!-o>qUiE6HPJ2(0?0|O7-e~K!6`R??)v?(kdVxK=h=GCylnWCAbgmq; z?L8~;{$jOl2)5?G6oTGb-=+czZNIJXe5y8+>6gt`)66t@$e+IZBIV-~)?e>r%-e}| zZ-QU0K1!ye5}6{DNpCd(;fWC@zsmD|?ZT1MI%(h|dKD<&?58!$UawvBQ&~cd!1{=j ztn8fY0Y7dyP3B|)C1d@8kj?Y{87v<$>jv?U@qnyVlyI!1!vd49KDHB11}!Xo*{kKf zxP)v(4<(Zw9yw7b0*m0tYFGEAUo?+c{M(BI?XMmm!M5sy)$BqH#@0pg!zJ}L{1FQ8 z6U%N>0nG(KGy?{x%$@D*py00lk2Lm5Oq2+Tw5xyt7>%KIXA?LUwe*R~r7H~FKF2lC zNG6e--+Ayi^KX6oHnp|(YqwX@GLKRrbOBA{7ITd>l}(4Moyv1j(b2_tV-Hh#Zi&!0 zZ0}APXb+V_`ApEMW5(PJ!S^n1=0}E{zbAfBj(7z}4|siiG0BwD^)Q1)BVpO5p!$iO zF`E48{zV3yEmd*O%)1|3A%+*aB8!(79a+5fWs^ZQSBljr2kcz1A2(>!q(#o-fVs zEN-tCHGdEZN7d}8qt_mNp9vEUXe*{oXVT*7c}BIio+M{%FtVKy7yI-D-=3l8U(&r+ zLJJhWy%jZngMGq(kT=Udz!YYyKcI3vE@C!F_MmY_Z!|U)mkgycyRHD7@aJBRUK!}T zw9cD6<3AyGRG6g6j z5=caf>%NMpaJWi+FHI|;4(ic<)1#@eO6hu+^)yD8<8$#!B()Z~=+`NlRh9KU+nda? zPqw_~%agOr!L?_?$9S!Oup7t*`DA(jptB|eQ|UmWv9GJgcHa)kiXs(3OjiCsX}q-rR_6(=bXl| zv}}^gm4NBJGqUY|oz@5W-w@rQ4g( z*sGSP#`GNZ!(jtcnrDz8Q}eYQQ(t&~Bp}xa^WoV|lZ*vi@fYmJJyX-y ze9={nHi%e3>E$E(vm2k5PMYx)%i_7SI%SP%0iv%p*3AQwKa%cHFF!Tp^}`C2--cSf zR|1G|(-kYd3b%>ctX36W{C_Z?iJmS-e86=Qn+kTL8kb^H&xV64?%$HzCw zcL7y$bSu>Qt$|GC$32y8es)zHotK{6#Q_h0R}8z?dOg)Eyz)<6{h{fjC=fF}&HpS4 zU>Hqi1f%V{u#Jkb=S2G@u_z#85?-b%Lhjv0z+zvOvwRDWdW$I~EgzqF-lvdmC0Y)R zX-DolW-9vl=_Nr;-2(;{Vy=F-4#z8stZhq3SurU@{}R&Hc;+(0?`ViR)#p|n_Usk? zB;}9o{e5d8;o{0KtFyIi$9#1Hmg`Vdu` zj+-_-=m*CTR}j(*{KX+-{y?ovcf3S)n{yj#T3Y#3bgQ3V2AAL}H|M8afL#;~$IV^Q zmDj1-x*drg4kJ01JH?MIL z=6uzLFhqOV0b@(qR<7Z(CJ{9$UJALM#%wyPZz&3CBpo(Ao?Gp_G)EvJ@lK}g-bhi`?eE^#Pum4BF5|~`{NA`1vo&!4 z&k?KQgbf`u1SU2n(4I90wV6%%Z#$bupYyUu@tmN=Q)lJgsTouATqoGc?gKrS(+c^p z5>&@>301W@jzKBns zj4us71j+(W(kzu3F{DozrltQd4nXcdLGBG8v>4JQ3=Pvi0;d1Gu*ndtoQ`2#)WBpA zABY%ygQ(oSSAE`CpQk{{GEn`lxZub#edWH5Ccxy0w~;Evs(>7G#Ytpag{nTO$Bn3p z*nbw~&WXZIvbAC@w~q%MF#%lp(~M2qAO5an|F)oY|NP_cE*}l&Jnv}dV|HdU7qab6 z1C|k%OlqN?qzm*dVU*O5W5g8g{PHdRCf1&~X=13saO_XcCLR`JceQrjJg>LLpYh=m-Fv%_>QyaGv{s0}5m}tojgFbkzdtvL@C+^ECbn)lJBxR2G24rZHuy-f z3~1-*JfPnT!SnW-3jnQNj$Qa^RFo+!IATLtx*yYdd!!sZB|^R*dY)c4afWV(yA#j^p|HHG2NAgXJ`9;oi)8Kr{pJ`Bvk^HCu_|#rL}I zZ;l>dYPr1zq9F8tel-*SY?5Tgxq117jIqf!ore?85)9|hAxlg~rW(|J$%KZ+XaYs(Jnv;7= zY;Ri%)Cum}`=~WGj|D9J!q=l$IjNrsZgDd?QbM(#;8t$UFT#Uy?#la+9!Lnze9RK$ z1F~_T)0}TXq2lMS=wSKDRMlONPd~8N=1VVARdoHAg^#zbiPa31vEytwzr4XyU7{yF zt{)wFC(>&0&Lj$}X8&3pZD5943*Ho!VfxKai68f*wUsn}$R2Y0ePhV|Tc$2`B;&H{ zSlS+wQoXnNzZ*W({N?n!Vik=d$ll1@CMq>hnl;ERj-f~!ETAOdvRYonocZ*3rM5JT zB!u@W|K3_0%4Kio?B^Obuw8Vp7gzr8qO@OKM4Qh=ytK{Z5_MYU`tnoTd1^hQ&h<;( z8#?wO0gLR`vY}1yu-O`2BMizkFFD67~<40X<4*Buz1J@lrA>sy`Tlu_PnjytnJ zv3#T?K6(&a&e3|vYy^AV6O?g}#`POlv()%q6^0$9|MZ4`_lp0v42idEc3{x>-pDx_ z>+6bmIO3bB&a$dK(5mL^Y9Cqad-uR?e6T>8;gtqTII^DEa$0h#(x1uCxOk99!lx;8 z^D7&l&ef8SCi31lfF!p7~S1U}e(MW}7Rz z!MVk1Co}9kkRGmX1(a)k6*0n7Pn7EECJo4ut=}~?4@EXJMRK8(+h6~E6tWA6(+-xS z`o)1-X3TQHnKIWu4iDmq%A_3p@Dw_O;Z;-#eHEp$kb5%3>|h|G@A@tj=IQW1j^TeR z`ae%;|EvE9b#pAvwLLR`3HRlVZ&to9qWk6dnUqSHGjo6Cj92~1|Mb`+`RV)1f2;X_ z74~Y7h{;WnEoG9mt!&sw(Y+(WNcrGg;jsO8y@iYFk4Vh@cz)eoCZ$KrtHgiz62^7I zxYAvA7@x{mvastBvdGJxzh~TRs|j3-B@PrqvHXn>q}Gv=8*to)HF#y-|H+m>WKSo0I)n$;BoWGm{O^--mg{Q&T@xr+&-!_90|O-*EfyZ>ian(>`H8 z54*n_v3%#OaVP9ViGPu_n{cdmA(tLkND=E_R zYKG1C9sD5S!3~>BeIl!<)sER3rcEU?CTuKKiT|I_ycZf1@4f_yA7HfI#Gg-nSwt-5 zRg*TfEHYzyPx?T5##7f>LKFkd{=$=SKW_HY45}I{Dv@v+;Xm!wRrh7geNR#qu}HI= z8mXPbrgR7={?7<26VQ3n^*2i-BsU0nrPo2`G-wc*&>fsK;+!8-+?xCK#9zMo1_8fj0jDjBI$9AB6L z?dTWAQHL#}HcC3hWn@OrJbJwFzZZP{-M#xq5YuKI-!pA?bid|v5s5wRaD1Z15xRoA zJNo?{#S_UXPL08x=641>5}^fo;%>h*3iwOoeO==}7FjNROqYvK%7hXl{~kX73-d6# zw5oJ(^dAiTlxIai*94t)juqZxc+dV>Pe}^dVxR91gO-k!2X>h ztxNV3|KB^!pOTq?QdmbFPu5&s3gRH+)H?-yCc+uX+^mu$z6qXGh zd~jJY`|}*ubU4HNG&*?Mjq|b9p6qg`-879+gJUE=k;{Ih1x2AqiFU&(_5}*=AV=8$ zn!v_9(pYWpgXSq-lPJ-r2l|%&vWN0xa&A@5k`FNo0ktaCzr|P=K046V=%#3F#eLxU zIAGdm!78KrVQE5>pk1Jab0Xmp-I3G6w&=>U)V%3Q4B59he0eYV*}u~R(fTVz=T_qr zVuB7<)HjYu)WwI9Pku-#cpVgf@Cw>@)ymdvCF~`=8T^^Uv{szU*^PTdw=6#c!`U-_VN@D(Eq+fl3Bj3r-|aAifYUi{n%o^8}+J+l2SB4`p;43 zu{gQmWY#cf3Swrh`%G~NQ^(|== zk?KwVfAz6RnYE);NR@EWSdrWr!Ut=X6gi%JY}XepQs7PL^y5OPvNF== z_Y9>Jf@BV3f^V0wtr5}}t$F?uXovl z_TY%x5y9N_Io7<^rHaYO2cQmg&G;WT6<3I$k*{JwfyG=jSM!$W@q#)2lo zX_z0|L>3)Rp()|?*2jx~p5SI%YnwXXXR~n?;T)KvTXDI|jk9fgRU#P3H)w8+Vru?t zNBws-{Qt1D(>q(1IQvSqHh_EQX3_Nu?i3^aWHqvq6w|+3Y?>q|W(Q@doh!O#a{OzoP>f>Ctg})IH`}r@0k|4lqv2VG%V@qu$}+N zzAI>e+nLR3vqdodMmJ7^?85N$myfqypTS~RzLU?@33uH}Nndnr__w4gIZ5De%VKWd zgpA`tQpE_F=3sSU3_Q7RQNkZj0k$GbVYs)Ei^S(Z1xCd;eOwLdwE z<`EN+n{7#pmR=7|=K1Qs5)c}j^ort-S#{uS$H05?Qb&i*{t+kqrpSjd46_~j?;Quto%f;m|CN6TRDmlna2ZAiYXz^dU_iKq^Gs<$%jpcy$dL19>S(+1LHSdkLes*+ zJKLkr4WqJ&3x!8M8SqmNd|GcPb%5-v&w?Y(Iv~FuwW+Bom67>OwGO{LVP=^$P?#BK zguZENILl8JS2+4QKoz13ljW;;%JbA&u5>JnUMphWia=}RyPdkkIN!}$>VBK9(eCCa zV|fVAm=x1>aOG;BZ``YSu6v2P+(OIWfd2)G$4`v+6s+@s)k(vfiQ-Al zN#*ao$!B$IX_d4gNjAZu2%hD6>EZU0LAfTdh?JwK%69s^hMB6SH-PJ%5xhfo&-tPL z?5(FwaW~x$Z_2)vJtu+22Fw(!i^88>7R_^z0{)@$0FZ_U%t{0JF(8{beMvaZ1swCi zc0^pA2C#L>HI>K3%(e>9nd1NhqlNRK4PY<#Aft~~87#zS9nvsd23_w^V zQDh4117gb0!AuN9mSLL10Ujd&30kqg^bjq|6mjHNbd$T-7yz6oag|%jz{~DG;l>3K*S&3?@{B#{=Ckpc|^U zY=o%~or-BYp%k}RJir%OU#MhTDuBR917UI)Vt{KKpi9!n_~6|Igl21t)8K5~0Qv*~ zlnJ@;0bL9phWi$dGS_W*<{xPQb#XVI*a>jVghPfq0n9RhA%z!mc+r>KB;YV*%q(M2 zBQ*(1mdy`ezYCPs1KDKlrZ6!zO0yo}HXtnx075Oyfzc5fj5s;2m3%h%xc3==o6U+b z^&|&~j)VoH^$-9+{RyB8496>sM1cTV86lv|!2&4*@rJ%W@?^RiA^IoOL*nTj6$PQ2 zYLL)Y5WuPs;le=>aS1YWwL)``%hVxuM{gjW&>&a>(*yIsNs^M#B&>BYJ&o15zGqQ! zrk7tq187Tdsx}jr){36%l(y=Ml$pszMomt|q;&Cq4USGeeQ8^2nefu|?^;mL zXu+y;(Z2-z&&7j0w}*!U8BL>{Rm1DTq{g>!(7&%O+vay4tz@;p_Ff>^zOSU+a_!x3 zAEopEp^aY_a&ypWSX@I5%TawTyJ{}cgiF)l^?%L~FjWuy|H4KEJZC|05^BCE=W3IG zxgQMzLA7@Z|K*hh7vp`VQZh z)2!f3KjI87Z`!m?b-Fq1`_QkJHT~$~(;L@)%>UB0O9=5a5E=+^$`ZnFNIY&BUqhbr zz*~#V&U8&m`bVM`;3uXH$JjOBZ-quICue-@a?`9XM^c_jpQ3Uhdk1EaQ}yqX0^$KN zU2M-IV2J?p8ZJEs_9LyxjauN>zz45m<}j^b0%Ny1(%ccinpzp+NIfsZ!UE)Ufig@P z3^)OdwKBZ{F;R>`9A+5@XaY;0!j^CW3GX5V7bKyb2TTJdk!AVhR+1P&j50NlpAHZK zNXj}Wf6&VRBZ6%R`x;g!5AtP$v^>B!f~TD*cS>9H3h@L#ibi5rZqdF?$L?(FAcA)l$FURT|bT%)5-VTq*jc1 z1eo?UnWdO+BJaHjsZ_tU%e~I)=Sgcw+H-JxUEG3peU~MR(pdo#p)2kJG~<4FM``j( zyS-5IH2#vL({Byd6^qG|6(we_)xDMCIpdIo?<2L2WhN4PkUB?0O3tSo5_wz`LDluG z*Z==)u~oMr;?93*v#=e!6C`+yd;9RZ8dT_^qBncp~k}n+}7nLBN zD*Fv4OGZzo=d`a_e8|0_7YkTpL9!tPPaO_kBMx~p&W%Cvx+Ys9c&!j5sR40#@X;)s z9Fnph2od3g*ElsPrPyF!@%fN{{kFDm;ugU-EHERDVA*=-)}^3&-akJ4FyHX9qwy_a zfz&_0q}x=cZSPop|5CW?f1FaDVtpx<=*66jEP2j2%yBFGqsUQAM|=!xL*+q`bqe(f znv7^rc=OM$=AY&!y|mw=Le$Qwd>%K?c@MtZ=`5N&&WU!S%K{_+Y8jXo+u@>R6@+b{ zB(Wgcb?}6V>iks#XeI0xt4;W_?gCkOme+3NvGJub-l+z%#zx4tN^+?Gq)*zi{~cnz z$kM>^?rv&o;0}L(h4+PPQ)nRP7~^r&2!)a&!mf)S`7JdTsiv?XL~N|o=35z<`@LwJ zmoBk`H10|_^fY>!?}R@u=Q_=|;=eB@^&_9ta1Cvb{p&L58loOCb7jVFG990pj+xF7 z_nJCS)L0Vl(M7oX{=h1O1M4#lCf+&Dmy2h6NgU0XlBB(~A99A*}7icB7?Z~v$8_~!OhXi)-2R*3bQ8dmh_J(oQI_ZHPct!4LN=(#ryKTKXf^#eZU}1`m}DgCpNNa z5byo_jxf(qZwNxUgsC!!Kbp^L>H*6=oVUHrx@VeURx23ss^VUBx%*PhY&n zeAwqfwL6jRh6uCF8a7dV>5b+Nn9=w3eBP&Oxk8(>%d>2Ddtaf7XW~5mm=|agckdsM z?THHVHxJVBE9F;gjbZ)XH-66_Sr$m3s_mZU`RRGa`*kn=0b||x%k1cpKy^79V9u}& zCyGG=0mPD3VO<(??y{BzGG)OS=W(nNPH~jt`NEcmFqSs-GY&VHmPY0*QkG*sKs`7v zf(>P%u#1;z3jFa{X-y17hTH}_*pF?v;jMXaLR|NaQr_SSy{$OPxo%N-Pc=+Oi(H_L z)v}EB4&N)Qk9(y&r|msFeIyh-TQ6^;2GVhLX=Bd|(p-5LlmczY#KdX=q!3}g@*Mb#&BLGltxu*UaRrLW)WgDM@ z!A7QNg`2jpSxU*dB4t8OEG7exGagY&1=9ja+vYLqVI3yiNNYk(Zq~ea@<1jdGl14f2e6!;prZ#<5I++=B*XKVzCnr-?-!o( z1L^V?AI?gUh8u8A>;}$w(tY?v$K#T}B$FMC7B_7Q2;MR?MQ+-vDT`aF)DU9n^L=j6 zQOTheZBV$aX&b;S{_K$KEZmBgOG}-s$ZT&7xZr>r+Dz)a=-29&Kvp;De8dXx3W85Ln!WtBT5W~DcTbRy z^Fw2A4&wstm_WrR@vKfi$!2k;cOFJ`nD}Aeii^51XzKm2$Rge zfRtD>Haw#^mQM7@rhPl;{+goXAS~CMo*m!P1%*u@A^gR=NmbQM=T<<~fd@yV(HapI zMJH%=-LO&4Bh<K`j~m04ExUdW=p)jZSNmjH^I~yLJlNdafSClO};q3 zYwYdH7&M)){L79kGq~MN0PQJKjQK|Lm~2WpK3k;?&WqL2AL_#?=e+sO4-+h^l|ZZY z*a63C-~bQUfH8?t05zE4H~J&YK3D*}!x zWu^kIrSB>L9(p$yoW$`2Ul8sT24-sYP-(RNupPT3?m_Dk@sLb}&EevGdPsSa&3JOJ zFSr{oOH3K#HH*qhF@Ia8z7h_D32h*cqYD)eW4_i0gQFC#8 zuC}IYk5l_}zx;T+r{2tO%Rx|2R7szbZ7Rq^5*0$wx)T7W8v>nLX|_rpKG?zU6;Ii- z3C}Uf)B++Z*Z}%XY}W=HnKHR$uwQUe_xv193j_Erx^y@K_uGlIS+E8PIDouT)3_NR z#0CpDDOz$$p44&D_~{cWVhybDdShBt(%I!YqjlrmM6KAk?iXW-8F}KoADWP2G1nyk zSa=7RMi)rWVSVyEmfIpo9juZ9MyD-7X^x3q*We&_D9`H^Rvp|UP_W@?V@BrgX zI2Z>SV^|eSg95B_fKX)(;sIcpRceaf@9xZ8{~x8hbfGga=nqdLr|P&7 zRvzAzqYh*i^8iiSK>rOug;2~GI#^&n0rDQ8jssx%f$=+F8^X(lYY!OE0>pA)z$`I9 zW{YWB18mFaU}Z%x6=K4H&(;V+6;D9rdkCD90b`)RX=!H!h*7}y9*e-p&{kfsOjmq1 zLv$UCj1R!-fdjzW-1j?=a*I5u;ARMw6u`)zLKA?u+VKHt1OVn3h*L>Ydq>*{!*X!3 zkt%0`djnK1cy-_oTr3j?keRF_g*HKWs5i)z&jSRi!FC8>%50^8$P~EsLK*Obv8kp4 zD+4@$f64-7y1TdH_rdt3n|I$jbq2v!pINq?j%u;9V}Pj8C{#km4K! z?$QHaXUh?QmNMC;6QBr4E!9~Qz=26cAP@tbUjt^9DZC-8QAiqKyy|wF^o=(F&`khn zk&yu~3_$f0)6cXQOs%{IklMP)16KYf@9>m4K{g>V3aJL5+ZDDq0ooGoU~s!QLDF}4 z3}P`rM;H)`#EQXSz;g;h6&H+BG>(mJ@~pQxH5V+?4G!aG1KUA5@Fma|33haVRNsLM zBslpBLmL3ge4Pq}&hkK1Wyk`mJO)h!1A5@ac*-$ps51-#_71oUOGvkp~=A@$w2d-0z8auR)}@hC!ASwXr5eRhu^Rmha^jEwF%ay$*%=WV8La88ZJPJ_0I0SIvDFBM^pu<26L$IYOMhPr#0GL)KfL;`sEwREj?S;N8HAyW9x~(Rw z*Ic^s1K8gM-eJCF0Bl6SuqPk}hbD&uwxH)&$juBbR%tHJbY%+T&#IFM+T3+2RR%)a zgMxYE&d9e6W)ZEq%EKRsT$Bp&LY1M`GSK-RScDo6dldno%L@a9crb}Vh}mU2fK=Yf zMGNo_n_q!b;3<=oN#Ouu5ofS`wi!Is{M&`!bO;v$QXn=6z-QwF*y{+CNt|ESFZY0;zb~`AG5`YrX$J$4L_Wg6W3q!; zYYG6{!UE7Klml3{{Z4g4yDESrf}5kFRNqDGr!sgAGzVJcvEFQ?$Vpus`6eRA5VgO$T zV3y)_Mm}I=wF2T8fk_y=Sx}3`Gd2M*ZyZf{$o z-!U$B_E?>=ivrN+ilG{E+TO-PdIY_Wlb{L!I{$ek$DZ*g@f_kDoHTdWRc4|iBNOAI zL(nPH@zK#iZ{gq}$@s_+oX{INmzR~0&q;@XD5_FoVq#KKXf81^G^(x`1Pj1)kRINN z-Ql~%qcsu{{S9@%*YPD%f zkg_S|Pl`@4gV}#dDY(s|;Ci>}w5YNCoIY0L(}osw7sz?Gt)HY87)+XZIeWdzL4Q+< zqWYU>uSG{-4C$Yg-@ohiH`geWxoHCO+?WioF$n&G3k2Xeo?J1J<+$9q!55wtwQ-SgrKd-k zUJs+4?f9b@2^S(5T&|u>$2!DZof75n_w`nH#o64%~y+zRPMf56mwo^ zP?6bs#&h+{Gt;2acs+xrVO5Of&7|V&OS;ER*0G{$DDtWjvc+%`#jn3~#^u1;cBNXS zJcHy%(Hs}mwuxrE?nXI1hf9h{-&fQ4!md@7;H)a?B*S`M1;lKxxwuxK_r7jl>SvgL zY{2=If&Vq58NP4IjaXqhUpV_cxXt_2@ATGcPsQ)ep*riXiiuk5)kVp+4aUVHx|@Mn z6Ad#4Q_F*k3g%9JW>4cJgD#lrnFgO-^F8y-nPN?JKu8k$%P}v^9MX^VI`cgelv|N# zG#v|Hzejgp=d&Oonc~3{XEp5-(dO}U&^AF^qmPg zR~uFNgp*E)u|tASU(nZIi)she1KF02?NRfVC-UFlxc6whn|W$&swnzo)9NQFbnC)m z)?!gO^YuKhJlL_1D2Ae#EdtJ;*GpWJ8TS$&f43|hB}=vvBu~`KmNI_BC52a_xZC+H z6}8mj8E5cGIxU^KK~VKW!Xf^-nN=!@nbU?8IZav5UcaM5=pVd7Lj&rhhR7~J{_#+x zQ9xaRRJiQcC6~3Y2X(YsC_Ri};e*d-t>c6isf#lfTu!7Aj=%*~3DJ234XOS4UjY#c z@}gANN*tXZ+;Cc-{g$Qe=ZO>k%%>6;UPg2^Y?!)B^8cb zbftW0CgNw^=U6k=;>PEr-<`kDb>DKZUC5nwS$n^=SA3lOmW=X7j9~A9@#%FWJ%Ty4 zHrllO73Mz!QaH|i3pr8$V@xW7zD)>h6&iu3fkZ^~EO=Zd;$km zw35V}(y})qU?d4Jq9ho95sbJ1rvD43)e5^d0nU&Bcw;CNAR+(=ufIx)x4<6wm9^nZdt_e;lKCb>aD9xsir|&F5AcZdx1Y&T;L1VmN zBGyYn+DIaoFd}9Q5&jVY6Sp`4v4!6;_h6LJ&|W1=LyIf3S*6fK_+Br z6JeCmFogh^;36nc4N_zOVOX3*umMEpfyiQzIt1iQ0jYCA`Xumv21pzS(!T+T<3YR> zkR}6qZ2?qyKxB)&k1r)EO^78$$P&m&sJe-hACM`;kj8_YLf`3?^2wR=D48Ou$ov_! zqrfO=nA0B=MZ__21v1H2;p$8hC4ii;SsubnekSJAOF!q&m6vCimlw5{7Z;zGFE5Fp zXIa=!xXK@vxH_j7=O(}QyP+p#i=3udzVT*_Y5Xvo^13z6y7nghSEXvl<*M!z+_b2# z-^SK2bHZzG6&yQ8Bxu+iY#ld!ONiSSJNP&lY<@4t|9%&04)^7lo4iM2`R&`I&)b53 zQGSRJ-jUqq_>=r&S0Jo&{>0i#qp==HWiMT# zVyWz+^{Z(KIu-t4%BTUATQTYHo(lY>b7z0t(u{^Z3cA0ijo(}8WJHk$ z9E}g(=ZC>|lw-hP@H2B*ifeOXXVZHj!z7riAUTh(REfJw=OQ%!-?se<{Mf|R zVV5X1CfQkg#jiB%_reET=Fmf-A?Z^Yn0J}|?vE*n1WcOnsqAj2Wpp7Q(;USTex?=r zY7Emm@*;DO9R|7KDyw zY&+Ccd&A5A;)|C06kefBF8}qP^%_4o!5;k{X~rQYOQI^~mg;4~#Aua`93U2asAan` z;8j5DNWF+G)O}1V;Hz4CiuFyG>x?&5Mz$$a#pQM_;L1GnYz>oiZ_n{nRJqLQI7oKt;96`w?evd2}y^#Uf&|rFc(W}&z)Ep@3pv_M}#~?t~=B9;aPBDHxggm`6+^% zs_53MEsDi25x{*ut*?f64)VBJ-CNtwLSqGgdsPBR*IjWXcp6$!Hy+$6xE52F5t zQwjDk*6$w^{1P~jluC%4z`(7To2SlF!kxJc=p5+*fHUAjOC62Tq2rHxqrQ)|IA?R4 z@AWab5$Dj0)G=mj3WcOP7R%JW7#+*$#9wKZ;;dmn^UNmFehRui{O%5RVGw6Bs_fpv zH_ZEk2GWk16{e(xZ7t=Hxw-29fR<8Pr>r-ad0r%E zc1}Up=i*hlaWW%ZeLYbowc?6;dC15Nj({30KViq9JTcF9Uz=Tle2n>btwwmFz_PI0 z-_+yOTEwHa57?DG@rg&eh>H(a7QHVmXAM~^>s3QG9}FzBl;_-bbCYDp<=MIL0ZxCf zBxiPBTlgIr%>A%trR=cvz`*Q%lj7&YXEHvQZ+f%3Tz8BY%CqU7cxbzO#eFJLhh@+W zyt$?(Vnj{oRq!e1PR}KH0m#6BB&+4cci9b^eGPEh(=KVycpchEe`};;yYr8o<87G;&b3GYv zVUzprm&>WADow_*N=HZH`W)SZ*Pl&cPOj`4-VKkb^+8j3!EC zi5;#wgr5|tF)!AZ6$P~V*AsDWr?`!m=B@U@{zRU{5ZJnwPNWF*Q-?TRzxOgO+;yf$ z;C*1#RZ}Du>8=NxiR-~vpPX41A}{v4Dsk+o8JJreSjm;N9x!f4VFQiH1s(3zma5cU z+gz?N_9kvk3#aJq5W(G4r_tv3yw}$9Q*iNd>ZB;1>L;;}LX6&A_>%z^O$CV?D}!u8%YbEAbZDGapJN#|ZamI0F2~Uu?d=Ndgc;)KA-vVS z4??Z1tkgpta?t9I zC?<72G!%wHqaCeKd?=Kd%V^FfmMn5-Z?BwwgES{))&GC4xXORIk%Ao-Y3 z&;}D4rH%(N9U&o*hlUV~5PVYI5y@nQL_u&#$T>p7u~(o-6ce91pE?p1X9d9!AzUIv zp+vz^5KIvwF*4zy)E$|i&3^zRAyuHPAYoUqNhBNvQ7a*CBZSd}P?1n_G!zIWM*f2( ziGnnRR+zyn9pLORauAXT5iH>ldlSl$iAWt%5emBEPhPPokxVEkYdEAG9^X|u=m)8W zL?OMe#6pRnq);%52}%K#9Ca0s^Z|3Aa7f(M4iNkj+8+{z2Wfjnhy3RUDi5@J1<^!Z zDf~|dWdZ32(OJD04W3!-OJfuOgv+;a5cwMM|oBUqxOCz9Lbo@j)wBXiZwkaY8XyKuytq zEQy#%)hUs3|6pM9LPSgBD>Nn)fEMCjG7&)vOCsHne^Kg}wCGSdh^9#j?McMM%jAHZfFdrHAXF!W#k|hs z2KhS3C`gPP@*^@C#b57+>z)P*oM}EHM37GmTJ1)$et@Q=8M70m6WWs9o8zB%__;#V zoblLSkbtj*!O2?T#&5Q6R%A$ZfH_?MrpIgAJx{Hz-%tFr4QxG8j@sAUNX_m<3ueAK zveAs!UH(XI>(QQ_z#tgJUL!nL`h4Uy<=o-Jod*?P~vvi{~!a0HE1u+hf zyk@D$u}0=dio9~~H>5fx+)4E+m?&M9{F7L?&<26L9aa>ocCU~18Teld$cqPW!Be|N2w;^hd%U zL&x0zNc^dfEDqj&SuQK0XusEJV8R!B2e)W+j6Az*wJIX(`PSAB%#z%e>(^Iyru^v2 zYt-^<;xw<4S%Q;{M;aAxTLs37OZ=`x#m5ic@A@EzAMyKj-yWioTTt2j($iTlpF-pj zp-S_h{%2_HvNNjZLd2_X{$Xp+dFfrHp4?N}MZ-bkjh%xZ@NE|yC!;mA{J3f`kY;A7F+(LwEOE?MYqGP)$daqhJ6oVU+QZf z_`NWpCX#-jGi|RhAPrz~^v!Cvh6{kbd>eMeYvls9s17}ncCrVLN7P=2?0&`i-gc;H zq{^?RUVbAKETfO+Z@DBWC6yk-?Mtm$va5Jsv ziTn21#A#%P++o3l<7Gu2t=PH+%QyOip(2pc?)ZG8o5uRt@C?jyhcADzrfap5s5ZK| zbSzwjE_=g0)5WuTHLYiQ!mg&^lDpuut<;b3cih>91(Opq**jFPeRhz(UqYS8xoB3_ouAl&C>~z0LHopJG?A}jS{nq$gM+eT6hXb>@mjWH*PQMmoHeLI1qjOjQiY?`fheISv2GHat1R$Hh&GXmsg!^xE-FQ!r7HGZfUf_}6 z8hRqz8dfj;g@CJW<2qkj!{Y#RVKL8n^4s9)mVH&;6XHrM9Y5FA%njj|+rz8qZJ;Za(8Zx1M}y4+$=Q$Gxjg z^i{y>=lkEy-k!SU8bv(qb2Vt|7rPF@(>vttG^b%dm`)#$5q@RPzx!S$mz(N!PTQMR z55-TWze-Z1O(8ensRdy(bO74#IMig>?v?Ft&dU1uWIwdQZJl+u4Oem^QkQavl}wchXAd^iIa?h&-Cz5D zIo_`~Yi`IjSp%dy30;V1cT6UE?_0Pk|MCG})~zh^xbsm?-o#@HSW8-eFENFpK#1pk zHnJI5ByvXrLVyb}YSU8D5W>V9CY-6_yRBiEDbb<)&55dRrt}AuW`b!X&ZBak@;Z0C zZ?Z$nMPQg1nIr~~de(^NMd_kw9Gg52(j7Kiln7wYR_O} z71BrLzfSFO#hSeBj_&W*SlTioSxbBJ{oT{*0!w0mkwcTe1QeDSBI*v{7{N5;nHqoFPppQIPN zO2TY5>{+-l^)vxuY06*sdL~;JzSXCAcm>pxMWhtS>s&jU7NdyQ>usn+dG+S8JcfPWfdxaMi^b#E6H9+blFEt8k4cSA>ss?S;7x2?=|uN;wt3D^Ea++ z&Vk8TS>y2>ho!SM1p4+@STA&uTeQq(jhz~%YS%TW5=Lay1y%}Mfal+__wRMRkzSbz zXkd52{;@HRV>!-KFq~B2{~+}4mgCn|EjJGr<4@5sE0Ur;p|lLYGn)i zsScqV-riA3Uao86)dT*-L=VM&r>T?{$`n0mwbl6J#gG}Z?)s>0TSUU~q~Vrh{)gF* zueP!ei72XS+vmdVcmKY~k{AlGrga#{&xp!1d-NiNVmGa+F05cfX1m5;g01lBu=pWm zBW9HH2X@mgh~!84Eu7ChQH(l5wy&N11p7X-m!hM~rOTEI+nA1a(V&2Bbtie9^(yAu zCvL9n7183l)<^6NsE9IA}>du&nK$uv@1~u%|nx>zq4^Ec3Y6G+4P-G$}sI z+HQFLE@@;~2{E3Fe`K-q_Tx5~DrCsLpo35G&y7ao)}oaJpo}dPx)UOmfm$t)L_*_-^t(yD*(S=*UDC^h$hLrAgF&aMRkm^!d0f?KJmc z8sC&Hz3>|+r@pA&F1NMP1NO7(#-NX%R4w+9{F4FIch%>+XaWO!@4{*Ecwi5X+3@op zZY#1Xx;Y%gekP9JdC4RAel3&&HAQI`+L67~v_oTA{$mQsw678`yS%Qb< zt#T4yRegxd`__5vqqr~h30w0Rd@{TIJweMYMV6J@hVt-h6>vQq|8q%l$AS2Eml*4$h8L>R z_t8>ONWU2#J|@g#LH%2>K$X9E&{JgM6Hz#DvEwy!8e(r=lg&+`>G5;&c>~wzSJmFq{xfQpuekL-=HYp~W%vUO!-)mS9;>#64K{_Vz$-=`gqU^ewWO8p%1 z0W>W+oKEsCU%pSPc4+psgz!gg>BN%_zSpg74K15S{nzz7b4Bo9U<+JMOx&y%4hkNO zyi(L3mVaVtxQU?Kc*IXdHC6I7qm&;JcccH%mQk_Ig%^U~v){h&MCfjRtKNCJAtkb- zQJQM5T<@tx?ho8Lo(o;8#XI}mVF?I8gaL0<5yHc6eW~?gOI4cBZvzKOiSnD#A zY-an3uipDQb&CmNk`5t8Sy$U!N~xVej?Y+sGp$8llsVR@7<_Qv_OYq&_s>bCp6Err ze7sG=dPmBM?4s^6cyTWHz2M|Z^0R8Rp8+MeitXB$a$^BUX$b%b04X9~d3rC8$~t@tW+zUY2$4c@;{N zy!4KUyEJN_y*PA8a&t|zzd<~$t1}vGUC}Rh`Q2P7S+soJVFmobyl6)(mO=-EV=FTF zTF6%ymtM#3>Bj2uIfA@D3xSpGt_h38MfF7-Gg6W+Z~lH?ABXM5CgLx@D0FBBHj7s6 zsu$JxyzW1tFR{#~tJ~bNApS_8kdb}oj;I9|*HlbnN9umYr0u*#5Tet2dL3`0K(@KR zmU+)p&AsxK->!;B5<*MuV_YUe4C}}nEBMgy>|*^~%dhfxNVjpfZ9cDeg#sy=U#4%f z!{bEc_Pk}IMsw`4)Z>DEA2ZK4!Am#!s?cmV(ORf6oKuR7`R6${2w%+ISdvWLiu;Ea z0}4kq_BXgrgu+H8%?;O&k$;D`f-LZDB7KjQ3@TjA9eNl<5BS&r-b@;8cbL$kmnm*_ z{n{Ho-5=@s!mOwZsgQtrdtkkVo-I$VBHv=@JtES!Hh1;BPAIB3cDD|Hrv9yq+E0hL zL)mgp ziJwujG1uEePZ<3APHCmAjB#U&T^59w`17N4aQ||`lc5V`vkr=N+-!K$Hg0}z38Q|o zRJ%RPOYqYwN6Pff%rpIi2veY20cvGXRPV1gs2L3iR44f9Ujv8F*Yvo3q{3NFape#n z@dj|;+irbg<9h;&7d1m1FckB^X3S`|&c=d37;(2v^4XBFrah z39RL=KcF`pijc2jAyLUm zk=-DdycH`(Nf!aofp=d|F`{g9$+^3q^7w}8vQTRn>r@+_OMUH9DP(WWH@KC_Q_(Ut z7l!M6$oT!}CAHiQnYou^&w4eD?P{dwOf+D2y9!_T^(oPJbbj(uS7BmLzM$;HlV8e* zN#Dn{KMBY?Q`}5`Pos*cBoZsiBza^*x^rYBsCpaNnlrV;J5|vZyW!GJ^hGIdtrx2) zH|RnOx5wx=R0+yXk*=+HGLJj2zZr3!Kakq9Co^6{xHaC!8;31*{;XW*^bm=h=;;ig z3lkioNRxBn){GGsat-IB_B|FJc(onyt5Zgkh9fSxE4RGU1kU{0K;+NjXwcxpNb6RE z>uq;GD(xVvc-o}GUiceph;ZxB_4oRGtBoQeXy7XnalcDSKQZB^e4Vy;j#-0vvGU&0 zw65`YLrn!m92M`CP@9`_J)IYtlQjAsqe6+oDNJXn*_22u}d+Ti$Gd0~^Q(sm0O!v%mRpWEz<6Gr#wr;ukEd|4U?r6RNOql1VO6eZ+$(M76acoauIljI?6nmY}-L{VxWGJ9Q#`PviQ_y3>3CMP&8Y-e+egpGF0WiMP%HL_&<;ua!HJ4S^A=6inUDX z>9mAy+v)S*%Q+G^do}DS_;a@Ph4kCDE^=CsVZZYbQlFT^d~;7sy%viPyF8IH=$6U% zw#_Z^Q1Hx~rpYhuEm+%)Zfr$RE!2rfO7>eJ<^5x%Cr&D`W<5NQTK6gJ4Kd9+O=*5s z=JUsL9{f0;tioF?o@ZHkA#Y1ZhYPFtY_zL*sZPx=rQaC_Jfw5V4+;y$?v4B9Q&Tiz z(HsUXybXDYJwi)Iat8>5>Dx1Uk+vt1O&1qBIK8h*Zi~>@Kf{YpIt;K8R~d1SIn?AM zJR6&GB(t^%Ul*whvvHK_sn71i`t=Ex2}tYqtVXLoQBmA`CztVkPB?`6u6wu9cmNc&8M=%9|Tf+E=$T6R0ytkBF z#tCKk8N<_#LkSJf3T%TY?Dd&4~4sr z-3zzxcrTvr9C<-&GN}#6PCSLNRtcbePd|EpRxq5`xT*cTj{Yarq z{nNURM3YMt!O9t%Do34xLEhcrLEnC^bgBhX(Rhl;u}Q^zxD&k({d()DNqIc^20BOE z1fRxV-Q~;srLBDZMH=r)wl4bjQK5Np@zvTYkeM!cVlO9=wQPg+-$_nVT51?XNh!Q- zenJGkTJ@wUi<+Qqe3aW+)VlaEjtFg6V9u@yL*@JVGrHQm-+`-Tx#W5WG1mcdTCiHF z5;7PCrr|4Eyf+%0hEBtqyb7YV$2Ycu#Q7_)UxHUYpEH&AJF&Cs=2AFxaS8h9T|Ylc zP48l$dd&ugmtYE&Z3S!es%F)7-=1=4GE-8J5upu!-6~gW+M@Xx;?59v%rdB*s9xq0 zzFC3?gH}FRYIJpIv6+7t9&5$U`u)rrPtaDQp@t0C?cGQ~YI~sn=c_embquAP`Wp$y z`lS_R5?=h~3z4pc*f-xpo3aAse+@o{>Uwyfzo+w7T#)oEbrJyDD?WVo5P9%WbY0@{ zol|^xT9$t(?o)j5I~8!DelKXhLBe#8V>lJ?7`S;pW^-j^$^|PESkqu(HI?u&wN*1q zA(Yf41i6f{{LmBw>|)8Bm3;Z&5p(0&`XPN2+2k6gJ!%*A@y}eA^A}Enlte<22!!8J zjwb51JvCo=bmCh# zPdu~q7ZopLI3W54eO+l6{FSLJZHQW6c#X+K~O||QC#^jV*us|o= zXlp|FrQC<2$(b?&+hGW1lI}V!hjvCTW1_+RyWP#3_Kg!L6dX>9>6n?H&HR0|h|9HKnoN7^9;Vm)Wb$TkAhl z4Y1oGy-3oW+Cx}rpwwk_Vt%6i(}@#yP=|97N2r@Ccnbw-zxq4D>v1XKHc@qWcK@bs zXEKJGdhRE=rSL0{kfd16B%#mq_dB1D6q>ZXmi14vACPuulWzyUoxP%YWT!MJn-JwY zq+>H(m|kjmf4?`*z4~x#_`455z79n`=_@e~KUqPu4vjSh8xkV?=me#+)?6=1vmEHA z(SpH+(K;jjLi(2jUkhQ;XPvh}`~iaePPnJJpM&)5Bql@CY+cOS=w=*n8l8+i`nCH` zp7&ZWR|Ox{PGyj{&C&~teqwReW}yrzxmC>QKgd-WQ`vEiXi|l9Zj-B|{Z#G9p&ZG^ zc4`sZld0S^6!QuofZ4lh>%9Bb!tXd~`-*c%o-Xmm>vAHvvHCNsW`<69wCpyFUvg1@ zm^WX4z)Og!r~^J+5pg+^5y}yDgcM8p4u9uwN^5tGkxT*q?T>UmrLi_8W%`p2i@DafTb%_Tl%-PSQUNZaW3m$Q*!LWcsw@Giam zE7wyb8EzgBaky^hqGNlo|>^F2ex335o zEFemL_X@+C*Y1s3GyH2ARlDqd(O4AGbJNP8A9mCQeV%lCJ#M)G;$UZhtRA)WRT{n{_?M4ynU#Ia z<=Wj;r`$)ju#wwS5mlQU%}gxK0wiqTX_KnY0y44TanYh|Mv^O{ZZe+AFfFsv7xD$g zzk6N?b`pQDS=@c|dKMZWf;vx-Q;&h}8^2Gj_#6T2 z`==W^Mf^YKi2p~}MlBxp_Wv5Taf>(rl^C=ST_6$w@V1nu!oxy=g+^}tM}$T;@T(0h zEc71%8oCh^76b-^LC{PH5!O3y@94!hfC%6-fDv>|B-~RS8w%N2=)dP2t!#69^wW6F(u4ExSBGbXX)+i>tmJeCeRO=>_JX#I6VgYIohaTW9&I@bHq zd8Rd=2S>3P<7oB-7sFK7Wsiu){*3vGSjU$} z$W1gE{tRu5rFrQ6$ci{UQyOhu7VN)tBG zXnN|4-?0Unn{J&S>~p&WNB?&CvOG}~B#{dT9m`<@j8e%Kmc#R&^SXqu>0Xq3fpwcC ze!*%fws)^PQ|qdIUm?mqXIAA0`-p1&?7xlt|BcdV8F#bhI|z&A(^M(_&g&Mx$QTNV z_kcPM#0?^g)e`ZbbmZ57q-^azuHGuu@0u6z7(5?|kyh>PRWaVuej~X-_m-z`Hi<9fdNDen3J)Y>e$*Q3g#F38?W3sS3W3EeA z*{l+i*b&gObYcp@ER5zpRs*)$W8fi1qdc!kD06TrDc+N21#0HD>v+ldor&*Pe&hNu zc*k_r$ptY<2-LnQ`Q5yt(}wSm+TAeVSVE)ZZ$!1JpRG$^H0#6x!rO7ABMEP z%k=+TAj}?Cq8`*2)gE`!AFrNRe6MMo^dIf-ZyRnIF#P_JAhnU+7+=jc@Qs2V{$JP2f3>DUUqR&29i~X3m*oq{6Q>fb z>t8DT^YZ$y#s87k#B26$)OCO8h4Lp1xfF|JLgNOCl3PCWS!GmeJP}4uUBsK-sD7B?UT#KuT6;( zhY3ewS(L)_twHUzS6J26Fv#d5v7!D79W=kIvx(_r>@3uZk&p+8NOYkO7w9Qbgx{5S z|EZpVr!pVUWu;_nc!TErKurrzh22S>;%`3q@Uh*3{+-8lYjza0XS~LRmC78Oc4Ihf zyLTO`JABC6MGvw%i>xK(xx*G5oxZ+n+m6W%9iv56Il*gm*=sE>4XNHBs){uWzoCabKLynxz`kc83?LkGe7xr_8L5TF6NYN+f1UIHo$Z%`pBSYNb zr$&d|mn?H1B3T%0yCd)?Jb=mQp6T)}?p%SUApx2d@xd+C69QP|i{jd(V zSh}owje&j^lhoq}o?T`xmh2w+F#Y%|({h1O3t=2A0y2f^4;bRoU*cUp0is80S7veK zhOAuxD=`iRVB>&g@B?#b#QKbjHy%8$@aaZSQIXg*68BuV9mS z@z9VuT8W!Gm?wlq_V)dERFZzfr8W3TR~mbKfKR)>8$s?y(m%@@q-$@+jcIx!Ez|v0 zD6NMk-({Zj6#IvE1;)F>3xnOZaR#F#C2r&QpAD0pwaQ_clwRg^dduo0Kj|Z~^qKPO zz?qbQI#AP8Z8chX0U7TY{nziCGPiP;hTKIFu>1j@!HN9~X>v8T&YVtu*}-vnlezsh z5oXq%>EmPFcD`S`PQNO(wTQgL`Q7A8slU0~<_AapBF!W5{a7w#jN8i3m60Qn6f*Q$<~f3{Z}gqGNlpg5pi*N0T|VrY<-R#B=w!n$UJ zqJg!^D#yi=@ippjTV?^R>SY~Sk^WRxxyaoi!I@TGDG>%Ze>=%FMIFZRNwf)mk7~k54=$y1Qd-lVg22fQ4?5bUw${A9L==m7Bz5Yf!9QysU^@afM= zZ<`CNNNyb+KfeZ$ncV?f^K-|TvJ0hr#y8m(d|`zJ#rH^#iJ8{0Fj-V@!e$(g$w}z# z@G)2fUUG9e?^LOdZ;*l*n*$3H-Ql4h-3|gU6B3SVQ;*#;ntW*qP~oB(j_p&3pCwio z=>Oh%|9L0*FH{r)j8I(97cNL#tolSkU6%Ltbh=!`fl_w(qyoh?j{dtZIaz#tnIk7E z8fUr{kZQ}hCdvQoC3F`KqJ2(OGo_O&PiQRAenywZhWxAl_#Z?;#dmt_)%NxLtZ?gV z*a>l=VbOjwzl9uhyi!Dlh7d;)GNYDtQ8v$Y7d>qIy{)d`>f#q3X|VDu&dKfg+6N7^ z38zJ*Mits2z2atb%(4n+NtT+!vA)+Lm`i!fRaBJ(U6mSr7>ydjZ#7JXaWy|8(zFw~ z#piIHK0b)5m%^JxJ<*H|zULY3ysLUUdlRKG&B^ws1?Kx4!Q_#T56SVKP-DQIbmi^7 z(U{r3WbPGjZ*FBCT$qs7T`cE|vXN*!ue&B?_4&IlCGU-DxMg}H1p|_IjyoYuvnuW6 zH1n5#)`H~eG>+==IQteg$S|#?G}Dtds9hh9IF{#=i}CXa=U$0v| zdnumzi=KslN6Y{0V*gE*3=s2P#VCh6pQ@xJOP9+VJtu}sC`*;R)T`)&rNt>CatrUA<5a`+epmorpnfXTgeCz_Yp8QG zoiXsQBKK>d*xEn4+)jpa#6yH9awNWaEPK1L+by>KecD(TJDI~b)l5x`c1uRDFmjC& zZEOgf9P&P?(x$|Qd95eyb`rkVeGYgRS9j2iG5i*~@MY0HekdcTIMXKMDB1dib_hxH zTNHe`hMk}&2bWw*vul&QW=eZn*7ROY`EYboD-GQ^^km!z2Jb`Npgq<%oRiU|Td|!J zy?V+68n73$H2_)cQV=dIVaP0N~S3!@00G zFg*hhlC_aFzo~9YrADm$5|kPFK5>;vd53G5NLh>&2_5-r@_W!6XH(tbqD?K{aIOq9 z8~#J?B$56}RYY!zY|fFuBU-)*+k7-7Cx&*bzi~P1Z_#fXZRO8=d1p1X16OfkE1DaB zIwl(`&2rLmBb39T;6mMr=7Z&aYtB0|E)_VnH>iwE4AUIL^=Q1SDgU9<=>7#Nr+B`-oLQO;PY1q{T5@-78r~L3EaUi0UW!5~ zO)@c_eDfoJnBEP_Z(Q@MuGv=n)&?h47OXcf%aL_8vRL2C**9yl59@Jq-#%-C=QnQ8 zJ>kyt@2FPb?8aC%DqN3{|L#W!W7Givv;$yZnC;d76_D+pf!Y3teT7E*bq9#iJn>zw8Burr5YX#%dg$$#U1i+{Sz*ZqW zV^+ZfU`az0-%UPZUy@@{1m^vK)j?TDmGx9W?Lrph9V1CG41(bm76ih=fMLiXLxKJ< zWW-o`Kv7sSUTz3nAjlsDO&5khSS`LZuolK75DqICHayf!A|7VCHV*>d-COPtRAY~z z75)(w-c3LSIVXb#?L$O^=_~980%1`jA%H+QZiH+%ZYpPRV4*= zXO{nR9R+%*{P+3>&N4(x1EQt3U~2%x7~7y3fFg-!B=8Cl4phzt;MTx>1sGE;0@2st z39KD)Kor2V2l$C15I|2%ZUQHYy$sj|P$0Jh5ZVA}g8-B|Rp4F#RVauo9)z3$5+48nDnKYzK=DHWRSb|d5r~`tguz88h8=?^ ztOjCL0};pIw3C4E$XvI8n9o4KR}iET2yO!^od8gB02~WUk*c5TF2oCJYv} z7eV?NgdG7wp`t?}A|Z?h5L|%FjzE}aAjF>l<{p6X0e~h7m=AaW;@tqzzrvBm0Ac~9 zK;R4rE(QeX0-^4K1o8m{6+q^45Mn(Lm4`5o9lryh^%j`VfmH@XS!W0DvUA_DqtRf6 zg3vPs5L*S_E(stth`_E3z{Cn6wu$H$h$<4n1j2HMz!C8ROL)9XdElFoh%S*xgRw{= zuyC5O30rWu_KE2pDS-!+67%ng&|ps4@R8wf*kBKY5FW$ht+=Ur|@nnr`&yA8ex4B9#c`Gz+$&2*o2y;#GY_s)CQ3SHO81Gr7J*MFTqS+G^ zsg<_}9$6d#-c0OiV^^Plwre-SIql>N11kWwHx>e^DG!=;M&yb#B)i8WwR|6#Da>4X87fx2>xIFDwmgvL#?L9J&IY%yeSWvT{V7u z6KC7l_ZjejmAZ(*H+G4b@mC?F&i`7?r2yY;i4~5J2!s!)l!3&Lp_HWsU zckWge@K=gC{v}t_8EX^N>pQg!yDT%@#%d0ddoxb-8BF)k<>gGnOi#8}uE$LC*!xN| z`uROjH@#B10c#uP^nyl6^^^i}VHFMy>pOERdLOF`avGl$^wpW)FmiesK zAS{^rgP-ljH`3WzGFSdvO~o6T^6$HWn<5X^UN0pk}@LDv@4tkrE(uP5kYQV`ui z7O}Y0$@=h_H4%;H0HN!qIH5-IIrCEfkGK#f)JW5-M3Rn=I0+}~G0S(-JxT^8xxR{` zYb$p@;Fp37X;alZ+b))Buw2tudE<+PEX6X(2KPH720l&DJplwKa?arV91MPSyKh$| z3raxW3TILncwXiOgv<^Ggyi#T8Qt}zdx{gGylX=vm`XZ{R|~bPTVQQOJY4km(r)RM zozl|S3-dbapUQ0hAVL%#Wyx13788}`F5AYcpdh`Oh!JY$)K)*|c52kea)X&m#~Q6z zsadJ_lBVJ=2RGnVQ}9!@d(S6Ta8l2Hxl^l-=m|i>0b(_T=FKr;dx;>tgqZ@edmu4)~scrvgLn!*n~JW<_bo zYl^oF5z`1w-K3>_Nb#a_OrpEn#a?+b8kUb0XxhdjV zwU54%Gb@S0dyzJIqH?k1vw%8qkP^q-HvLr5K$~~esVnAZzCus*?8?wS7l9MD(DxCn z_Imn!YDrJvlQ{U^a-3FSw0leD&|apDkAnB@CUnH5FF_?{-NrHyCb0jB9Z%Z-174N) zJ91?o!1~J@0PJG=l-=ep!iAcJ%2oTJ2A&uX+t^nWr~@Eq7?lG>pP~%33^e<)$C~P0 zwi66bP$Kt(`-uIipErPB(-XL~OV6az)OaaS<|whMm>~U};^vJ0XH;|`i#hRXCxJBI zyzuRg1nN<8S#c*U`rFyXpS3B9nK+&{R_1Tp@JdT*^mk_kjn+Jc)Jgll!1Z*F^rWQU z>td-J3H_C!)zKF$^I3!wCKGP_!iEw@N=$}FGXf5L2#o-wdxbYX#PJWhch`RjadIP5K#GLEBv`QD4Sj&fdS&wF%4pIH!LSFE}5v zK~|Dm#g~KYkuAjdY?R_WJxYg<(*L%4eOEWUSwWJ9^eTi7+C7=G&beSSm1 zOz^QB7@hB_HE>i+DCKp_^?vAW*Eclsjq7mObmka(QZ}3vupPZ3PYyNT!`U3A ztn6Y>& z@vC_#*fs@m+C(~0FNnULa>EHeN(?*@_B)_ZKfzuql=@e}a(-LnP0Z)2GCKNpyG+}t zIE-`#zj*lltSA842Rzgc_~GF^O!MHcPbq&ci?BQzF47>J*D zGedn(sX^AtX^CmrgML}t*gV-?JU@Zz7BLU}4 zDsS-qBgg2y_*0A73CqukKr8dJz|Rv3ZGP8#!`o#Y0xE(Ajqec?Qp&z<`xjMqKh{P4 zH47a=BhUqTpK2H`5O@rTHDH=(kX(7-2N*c|Gf+sjWWX)VSQ%A+W$&BaBdIC5^n*kX z##R47k=M72dRZH*N+yG*;{Ia0i>ExVF=bwc=o9HTOt)*vMOSPheQ8?BwhMo)4zI_G5@W~kLMQdsT!jP+ zufDU;BLYdb{nSA5%K?II)whc3lQo?`?~79qx~8So^UUdzWoHaGgyfrY82NmUqQ1ny z?6>8_vQ238)}wF5cwZ;+6t=^BXV=j44;}kJHfMGpGoi~!1^zN(_+dIwJME+kfjCmW zpX(Rrdsi1^j+Z6>x_b!JBseE}C+VFA7Y#@q$S82orx({}$+GE0^nfn$^$tRS2Kry$- zS~0wJtc=KBUhbLAP8{(0KwDDk0O1XUef`>$_svFWxB;0({VjvAy+MZMQw)q}Chdzv z)=yUIZR^F8GaF!c7;=Fl5?)}!#_W$-4H|phPJPf_GkOrLNXM6K$rSglELH{sn{cs- z5K5%@>#DV05?8y8i3C!SuxlpuEb=+Z687lfR`pf$0@=lo6jtZ`cL0z0q6t$SV}{NA zi(z_ufOrUZv7^3IgJ{S%#bEP9cgz-@!EIRsI=gXuB#V?o#~nSVEjl5EpZ%26Wes-k zPE|Ofp%bHiznd-@3yC5XX!p?X!a>Ik^!75VJKO5xxqev`*xdbatT{pz5lQ!vIKOiW zX$&T9cY{TKQ~X`{m-^o;jH#cN^}g(Twnfu5zq)+(^?Q(Y(f||Mczp>TM_BhWp{9DQ z`gr{*H8dU#S}%x!aXpILHJTuf6cS>yM7 zD8HZ7j^iB)4=JE7vITC zDjL@}NJ3>P1--3=i#>{8buObiOJN@Uv>?ZWOF!tt4r+b0%^PfWb$ZSi$ap(#7EvCQ z(=g~jv!y&w6$C>kE9R^+?X%Oz2<$_gYd&@*9wdPdja)I*`9iBM(MkhL#4bHWj=TE~ ztaac-hLk^}o$7zQt!-NVUba0fw#I+}l&dU2vyP4;IX`S-^ZTED??*3{Oac+Y@TPdp zM$(=Lzh^kWF09RDq_ot7TJHkv#}|1EDW`+6ZPxT25aWoH$SoWfWF(NdBYQPcJpx4h z^;2L1l|NiXCPvDQXbwcs>cK{jqqsTo#8RShfYg^{BKX%03?6N?&=!i6xbMc4Hy8s1Q!B7 z1(yyQ10928K#ic!u%Dq+G2AfRIqX|FZD_g(s1fQl?g20c&;jZIJE*`l03wtuq62t0 z94Q0L@S)z~(4BCRZKZ9Jma|fH0~{a=oJ0rMX`lm;19m!|g7tn@1{b}D#=o)pn}7(= z6E^cUqm8!R$%060nJN9$2H*pfY|WgdmA*iI(1{V@z$|)r4OeFUt2_X00E;uN5(}*o zw_Z_FG&dO@OMjLyZDj{pmYFS!uq`(ts0vsHb!}(S+Bs6n@^)r{RC1HCWYPBbUqvTr zNjnSwT~Sdw8c&J#4Zw^=SWAfvOiNoy3;iAs7M50$*2-cb1CfCtEVN)eFa!+wn_I*U zriDQ8AYiZ-lmbDczkwlUEM&A0XfgZ) zzlYLf=un_g1N>zq`@8*nB4j8#^aq0Sf}wnWlR;qU9*T(!0eXlFRR>xODwhTCZ#Dug zhy@u685c~b1c7Qt3T2=LIg|YbfebB83PuqA2Si#kJUsNj_>liv7EBA);|9|}%?QTS z`^%3&3&rJ)2hE2hLD3TiL**bqQT-JGfz}~31Ii7-g|Z`q2_d9lN-_}SuaLh0g=xUF zd{71u8w(p)llCK+4O&+SU?DIj1R0X43FDH{YED5?Qp3`E zj(S(Bp|_qfs@{{i+!>ZlVyfC~;wE&j^FzV4#pXq~LsJW5^Cd<#>tg6C9?O(Rkh)jk zoKnS=(w!=pj&Ld}1^<@Wlc#RTIbuOq4x%B|8Fgp7*^E#~o96EYo@ptJ6`}>j=oxbJ zhD3WyUaBTHVk!jQ!ZU$0#5jTBjhl|(`gicuMH=lvj*5>RW#DRHV{mH<-=TEgB<}tn zH#8&z+;yIevXdlk|C=;!1nSQ+U92dxOHTaXwR6ft2zg}X!jiiY+FB8+)_orTB6#Zj z-kOu^-{z`U8pM=`!cI&7tvB6d)?5;4XN#CEZZSL9doTE%azPJdbv8e1H+}&xvCqj( ztuPtGCjGfG6+0eHL`+_-3VuNXr3oQ1P2-Q%oN)%5_&2txCtGVOSd$UOzEoVIDs+cs@a@BoxiF|;kS~d zS}%|~ev4z7q6oxDE5}egK|q^>J9MYjnYmHZaa}BE0R*+3Z=72tY5EK?8wFgN8@FFN zOHcno@E1|R%_v9;T(*dJ6p^5N zOUNK!Hlc17dEKf*&q|pUf06CDsf)I5@#6Cmm2WOv$Bota62pc)Qn7UP@#)fr*V|E? zJqB^cbu*0XsYhK}%%!c4KLFmhs7v1U{LZUuX+GMh^HVuR@w%7vFR+LgHgiRo`39}A z79RZP$~qbG-w9~fhcJ>i_gdFUcpoJlb0p97JeOcjo|ok86NLG1%=8kjFS(EdD${t% zr=~afENI_FVqXmTY47H!5GMLds2c{E_oY3!8elOr% z{a7%tMW@_K^tL%t74rt2ulZxy4gln}Dkj|+0k7j-85{4@thsd5 zs$bB}^i131w)?6&Md6PTY2~Dw!lOWV0>>*QJJqE2Xm%x^uaiQ-Vu0Ybbk^UC9pdYx ztMR5Q3lozSW)+7|xzO~F$kLxj$mHT+nMia=>YNJ$*O}GuvDeqxyp3|eA(P}McH2%S-)Fob-|0ldZ!2xo1b%4Ks2=WPZ6BA!h~`o4I~c*E zIn-u2PtA@IEy&h4m#O0JU~%NU#Ocoqz~b)ZJ<%9)_DrIYSW-zLvINN_=PM>)Q*$*+ zw}MZWrG*u<6@@;b01E`aX2lBe(oW7BCXC-R*;R>Wf0IK+#% zH*d7E+76E6d3dwisv?L*&~HVXnB?CvvX^=8yEig9&A*EaKeQUP)6o*MWM}RAiX-L3 zmC?M0`E^x2O@iOrMY9xjdXcG|eNz=Tewqbl+9-6D=|E)Cl|jbKMwzv303TsFjI?Ru zWU)iL<}I`PnQqGw)#U8j^pN+IVTs)CT292up7QVl+}ul=h!Mjh#T#u5jt5wc_>VQ% zd8dXNTg7P*%2Y1k0G=SPG!v5O$TXO!;SVSM6p|c&jbPgkHg`=aYAA~LHF0fjygD@|7gAM%q1-jedt~CZ0FYK5^SyQxR(NW2;SmX z7?r?0qn(b_y;gSB&}4TUkiabDJUWC5yu+Q~f*=L}UGkOh??5garQ7F1s%`ikzkIyx z$Y0*8p5oI5CMP13d|+GfAfI7(OGb$ya7+6xM*o@Vci#6(Ll4svk=hIbf64bdRLo%3 zu(+S`pwwFIZ{OrlwBS31fLL(67@+0H`p z=?ntzO6G-qg zkx8q(0n3GK<*($v3It8r3j*XN!okOyeRN#=bd{@&6>iO&bCc(pL6~HO@GnIyKe%p9 zGRJ5Q1^AyF%O3IqFKh2X!7UlLX~HI7PFws`vK%agVSqdKzjJ>I3f09lw1&sIs>_O9|Cy4nw?GSCqqV&Y4dPUIA>_0l7Zj{TxFX8wA7I929QQbW`CeK`3O zehL2@C!0{WwD4DBLlq=M!);_xlQGi`aQlx})16&(VW~rywJXA@+&3C_-1~7q7P%D#uej>6PT)$`GF5YV#O-o8NYED zwULruPav5oO8YIQcM6i`iZ4&T*EjYLLp<`;FA|}9Tz}U0=DMiEgaJms`%h72RuxE-RDK>KesOwRDUF!vp-2 zgVEUu1{K(5FW77G(wLBCatQ?%S!I*WTh*WG2^x?h&3(qvB=a?X_9ucNM@SOW&Zh>w zx)P??PiX`Mm;Ki^p3n&&isX#<)`iZue{>r^wv75{k^U4^_`#mJf6>l9D6Kqv!Oh=a zWvkQ~xdTX&_1j5c*IHRuNO9J;pN%%#qc`S;kq{`$Vhdp6Yz|SUBzZX#UAoIv39DfI(n(W)TQ6t^nRIMOhaLO30OWzsY9r@;f)82MN#M&%3qhtSr%ma`=12V%52fi zJK9J1hKbW|l}7r%Y17ev<7ju1y=Y_)haGT&HSoc1I3}l}E7JAlyy{LWTU}Y*AhZ}) zVEW^G`!$5xV-kF4Tm>g}LhH*;-}C5|)}V>Y5nMEq)4B`LqmTm+nT>w@f8O^8-p#`3 z%hNi<5x$>CxxhEYoQPDLPwVYiti0`KV;@`?@nXHu*8i;Uj5|`9!Q=3{xMV|A`9}V- z%Jcwq!a^$}ww0B+l+ba>%{qxJMn*_bX>6yTX-jS=c*9(7C7I5(&j^U+qOjlo1v{ri z#SRma&5HFd#|w{zXf{G+JBC}PBwVRL!3v5QQyQ*!TA-`c5PRPP3dg;x*t*bIVp9`agcb9 z!038c65J~k?}H|-8NWi3KyR5?01XpjRjse%weN;;pN-z?9r;)??&9;}5=}G2@oRUv zx=Mt{U&!k+5y6CV#bwvKDY;Di@rg4ZkOg-@J?E0rnHJxD=l!7VuS=*BwGUIemc~HJ z+z~VybD8DcjqbeN{3|W@^wML;SSV7&KDz|zS*y^l4pl~I6K@Sn*O12V=GFc=SO;Qj z?goS280*n!sv(&E3%<>=mo!P^`o>=Cc0kuydz-`SmT{M12Yp8NL<{aK*E^=ybqx;w zsh(2tU?z*+)O|iDN}*ArlyTg#D?6@7W{b!a8vGU%2y-NB+w0zo@MW!?4INCw@0FL9 z@9BZnkEqk{1Uk?oCsW6;uA~rt^HTb%f{xwl5jM3@+Ar;wM1KEhn%8zoXU!_|H*?X| zdasOXFr+SNiSr<_K~eW+Skg3~6oyEAYLmfl)Az?Rby2;rJ%b@Hw-;x3O$V6^-ju{w zFSC<$;=ooVnqzV18!o2ENwgdXHbN$lC*O8T% zNa5048+>CJ95ybF=4Q#;zzm@{GLK{r^Xb&1?d2mIN>O23fmo+PBW^h#9vODd zF>le0vMFJ!vwK$)yP^y~sx@^h@5X3oVPS8e@qt;6-u3VnUO1C(g_mG#?tNlO7CZ@o zYx#X$XU7#z9WvmB{2?(I!80PFolfu5JH9WiQ)T?Kq&iD{*Q4rA%D7turOufuKyBwe z{4Uwegy)nW&JtQBKPh$+QyAM-#&aW(*LRkNZiP(|lYwk`uT#R)jZRNp@`}&}jVMu@ z%=6aPx2Tfa6HxrMbamgYv&3S9e5ai1XDFewP(hY$TajcbWRep#k%1kzaHm8v_)qU$ zSlMpb6qGh~W}Uvx&E+R8=*Z%)UM4u#^=iuO4{A7GQCajK{k$;#BG!a^b^9hHI!~{1 zyIh3FzNtd4cmNnNMQZ*eL56F_3X@iNjY#gwBj)OnUwIo+A<5w@TWn*SizhPuO(;og z<^SsP@XeISTv&fk3NE?B`UzWZ9*$*>Q5@&nUzUs#%6TBkWv``}T~@zQlW5mwjh^C9 z@fhB)y6i~$y;QCt4{>4Y|$t*H{O%JG1*s$luSuvW}a=gP)s~uk7i~p6q!V zLa7#?izX#=E-rRBj0an=8J5`cNt+B2I|9RaZ}-b}Hq=6@daXT3cAo^VQDLUl^RxIb zg(?nZR!Lo0-LCCnLK*0;!*dYuT9@OMb~q zQ5~(+)jibEdDA65mC{>KV{AATNS%<9lGqYgj2eELhUiCEnzRWA^HC9oQVj3qgN9bk zQ$G%8f!ilPr5_iDhKKAV%BjzI!DjkrUA6I1e-dcOcz0X{$*IdeWf{Tk#}rv{uyt4+ zOq+T&pL=S|UK%>G(x*Qm5gDM4w-Bt8A&Og-X$LB7BCB2N-8bP^Hv&wu-uDc4wgch4 zT%1rb7N%5Z@;8;qA{Od4Hni*CCNWozg;k)@&+bkCiefu(Qtl3i-`s5Rf3e!KUCbPA zdr;g%+*55sjrcCG(N1dnNXC?>Yp&x%VAU=B<+CMl`9sX@JU9pEP|PD-p3RO~nq61x zL#q1gF2p_GxTe8ZGG=j}uJh+_ufz5Wonfo8iM{$9du26Sv3I;~IGBQ8EVk5_vk_J8 z23;&{b2LJ4o-PV=MKz=t9D7Zs{f3Wpc+zJ<370bK9BV8!U!+5=vQ$|tfL=P~!`k&` zkJ507G>{DEe|u~q~dKA^T-5Lm+Czyxuy-97uSzE-bZ4i`^>f> z@=wMtC_;1VvCaOrEliTUMe+sSu<)cjA=sbl*Lo&Wj#N z_mNb7DXs2#KJMM0et^ti@rUVxty$>ZW(r0-wofY_B0oy_R^_-iYIXmd;?n-L{MB9o zZdc*)vP*Y7ou}Qlf=2Z}`NJZa!C$gc)+~`YrPgo}!V#Ppd^$c2@Y3D9c)Vq_?QXbE! zUD3@)Or)HoG}jeCIxm3nz^=eJLbPMZWl3P%xJdnK4ylz>*p< z(^?hrO>Mr6U`)q+&*HA)gae z5_}u#l~_y*rx{5q`6!%eIGM$ec&@D`@J4z8kW_&|Ref3D05-ifwC1z- z)rsSF1;{u{NFU#de#yCiG5&J&r-&HYyU$N6_sl7IfY;68QnoBi59VuU)Wz4|_Zpb_ zukRM8s7mY;jJr9+_ziw~ozK0_$;8FYe(B@o71kNawVW#pxO{KlifoX5GqW=Oo|?^< zNc?0@45cVWDQ(8O?NpU@gV@eQ`KZ-iR07dYR(tKwB%0XLPcBp5`l^i3HQsL@*fapJ z$5YXQ zk9#mp_4==9KfW3lU1%8_n{eCX2NQPRa&Hki0+t#{#kn%r(br({jtwe(J~YP(k%L_D zz;>oq%8fMc-axCmu+>l%F}8El%Tv7nHpZ=-$IH{-2q~RIx+dG|$YPw7{_UKLjybmQ zXFgE8QR`uI?nw=C;3f@~W3PNcdy-?=MMfj`eBw%{?@jPXwAz^Qc~lJ#Xh}UEt0LXO zv5eJ$^!_;I?G3Jv>ZmO@IOapZ7d+DvpN#{1X1Gc+0tjbruanhYrH$VQr|h)5PgQDC`FUU207{2PY4WlC{`!kZge{czhqf5_WffD#Nu( zL1eowFv2jp);C1P7Zy@M(>u&8*CnJr7(7dn6s%Fb;AB3(zBUhw%odfj)`c6=BH5$u z{6DPS1ymiuf*@?}1%kUIi$6f z_Cr*NWuwQ>v3CW(d?;Ma@4nIl%oyV0z$9Ryqdl3D3OcS``j49YDBkoo1Gus)ROR?$6d{^uv#W(KV_JBA&cUgEj0+mAY-O z`963A-V6_3=)LrAa<0F^>fmIpv5BgO?PiXW7QYsToZRx**)nINM!#vJ!Tj5HrL!Or zL_PlIYNr1jZl^^w$_`l)GJ1dMTmDaL7w}4i#8ir z4A3AE;%hQCvNHTfJ|Bi>Vy4RdO(i{~?qg9|U+t#blMbV9*=6@o+vS~*ua1M;VThl` zS$-TET*~VdU80qIc&Znt|LpMP69u!Q926N{Z7c*$pt|ttrzkDLw9mdvvTqzMUY-rn zbGH~R10-|)TUd6G9X8fEBA2SD5^7WkRS=#Qkg-k5URx)In(}oRtH}OP`Xwk?-(E|Z z>NSak85lyr?(V4s1C5|8fp?+Su%`iC$vc-wJ7w5K{sA}RLU%Rm?tW?Nc$?C5J)9ak zUP17w^jOzu_)~rvIgh_g+$pcWqM4C`xwSbTUSsq#?ANPg$+=B-9ZuB#ygn-7JK+;gWpU4|V$jOJu?yXEL1-KMI} z*}eYNsPO4|WW90UURYc+A8uTpxqQAc-TR`3aM`Hd(M@vWD^GR`=-FeW6I5dRp4KiZ zbM@pVq%D&>pB3U&2zAC{?~8Nu4+ADWo8~R;lY7mYY_Z7j3CXa-?*uOrnTv$f?)M5i zbh$E;%yv~)rKIHLte(BfPu!6dIu}Gefdts*7X24HY{?XaGQn&KiHe)*a^ajm?Gq2L zwOj0PZb>z3=+4wN;(*fL&WP+VWAjeLEvCYuUHyJ5OX0Q(N7@Ig)KFL=SkyO~+z!Pn zrVJ5Yv;LVFP}W?hOePlH;Md~{`)6(8oX7HV5TAf}S;k?usa~7-yXp(8VC4iTK+C5u zhU~EN=pRUaeJCLM!{2FZ!AyKNoTY{P!6MYE_bcYxlf`f0pxX+h1LHQypotF=IwlRg z!>T}AqNX*Hxyn}@#6tW!)lGm|fe**F7twD|A;03r0Y-7K zcXmU)7IKQRTUt&k)FD9z(Lrm29|JVPkRx_DG*IW~Pn&vDdc$!i&Cm#|eP6ZnQ@(+F z+^klZ{JhpN8cDkN>G!sa<*9Oe zYSVS=1yxUUpfJ@I*wqOu&>|LCvOgcKwj0gCS@eQKX?Uqgdn!u@KxAZPN6c*Jq4#V` z-KiwlJk$`}0to26IbM@1kaL?lzJ^&5?TA=tA*abtJ?WeN(D@1lH^u!J4fD(cYw1bW zsOjeTr6ce>vriFsk^(V3lJWWeE^NntP1x{$oe1~3AC7qQJL=cZ8>8j)cU#iyEPMvy z8BP-DhZdV<#pAYZ=GRE7fvAm^WbS?81X(t(n34&;6;9+`*wxwJ3cx=2&wnwlhsi2# zaev)+DGT$c`Yk>O`}j*(rvHAbmG|~8>?2PN&%9;ojq6w4mbGLBw)BF1bsqz&AUm}) z?&EcA)z&7;b%{6+)6L;*tt83A_-I;_^ZaVK^_;h!QFST4F&A{@F0FsaCbvEh zax3<|XQT}TQsJn0@oFQ*I2>gGn=A#`hAZ@OHF*I~yi2d_lUWhiZ-=flo#5Y>)V*`k zyjvb~dUzbOQmHSJjFXx4D;yVoOq@ixd(^-OVz`AlXnZgHE(Dk?8f)C>0iqKVZY zljtM=Fn>NryL@lea{7~;!CUoA;w;vIV*JyD@z|bt5v=*v671*Wy_BOvN-_Hw-|uu6 za0=yDAA9inGS>+Ui!dw3#|3#T8z!IgSUa&7mSmQWeTrY8~YAtu87kOk9C&soX+gsOcKMqtR_ zfn#ND!}^x1({m>anok(|wVoPQoYC73*@ zuWKXamh;{W2z&q7elx0ec~d}G)>s}s;z2kQPEoWI6v}ss3 z>u%rn=a1X$q0@O)T^AIL^9w2>S^rZob<<`SZiB)cVkt2FhL}2joW^^{07GL&_$fmOW<#z&I)I_31w8clDf2DObtpC85`u z`g(#a4=TB}w0Ry)BTv9)JhM|t+*G}VR&h0hMPBIG&b`|~%P`~2MhAY%%x@dM)9=)t z0D_=#q>i`Y-SwL4bpBrNRO#m`RLBBRRm0@#4_j%giZT3l4K1lohl~`}44Sq#Ye2Y= zkZMYJ#s#-EWXOE7BPS{;@zqR8nl7!>`_E0eLz~A1~mbw8^B8sT3%n&2p27p z-k;@?%LkztBdke}S*xn`YRjb-AkoK$Y3048$#Nlg&58{MpL0txa;<}3QmdCVk?k#z zbU+!j`hnk8O&O2){pWV6QoZWZTkM-7AmAuv!aDL~Q}@CjGS{dZO<4&H_SA0`27|jwvzm!H^CWodsXAe7;*@&B$YfOOdXWUPurNKvbMM` zS-Mr2(sn0`fc;#&3}|Rw9Szi{9y=PsxiCa=19~QKN7UOdXV=CYE4f{e}i&yt9lFLKe zQOh&mzz=5Fgvp!x*VeUD!nr%E_~0(MrVOI$N93aL4$(8GS}@Ro8;fZ9Ame4!B#N8;s>9>{-V-W}r=n>JJNH|sC@e?BI z*Pk33%^i9gYA@06>m)v*|InacvkoG7+!W+>c))`tN21Yl#L|U7ENh=8D_7^uUf#4X zlT%QORmJxs_h2+fuqILbJr}-A>rxv^49@_|wy{QN>6TT@ob?P*HLJgKk(cB3f zoZGD=C!dE)1aIEB(2;DK^2NE5NaJoSlZ;A zT+7F=SQ9qpDQvE*f>e^ns!)~$bmSe3ESAFkjd#-hJE7f;86n!swO{`iTAqk1;ps&C ztp>G4trX?-*inE0Ob$dtzSs;OGBn#; zbEB_=M!dyNqM<50J`RC)$l2xjS8f>lHCHUADPrv4F;{)lgvzw5@v^$rcpIPfS!K$Z;XY#03mZhz z`IGihsZ5G!i(D{sF^L~O1;W-}#9mo8fcHx7lhj;sSp6Ew_kM#MS2HW*31d^r6^5Ty zN25Ezg>f{{9+6{&9z8Oa3&a-P2r3h+vLvUyRi9vlgD!nhp?D`tQ6qY{rF6j={-|iW z3jL&OX_gw8m_f=Lo*eHE<3oE`na3P=;AUQB2uE5dk%e)hxPf-rNT9{Cgu6kk3&9oZ zRuvSdN=O0c7qa-CdOqgzd`lrzw7<_#kknWiN3Z8H@-h zS`lsItf^~82qs5E`*_=~L^v}?3Ki#L)QBz=ooK==kMx;s>}*`Lyau&txDk2nzigPz zS$i~sBV4X@IawsSdj+5MHS+r4^pIbPUBuMG*Zs`w-;L@My%?l%3|}&2PIx zG;;!3C~YZI<)imy61r{j)#=D=ii%55Q}JlX1q#qexOTWB#F zn=lMlzsFIWOtwPuak1g}Q13c%gm;f*IT6A!83qyc{J2AVv_`br4!Ow;IB}(&`6eIN zn&)HlR{I}c_a1yr90d*Um)%Etq^RI5hCrM5rM!`1$8!H<|j_;jr7o2o3_D4xL}vQfua9ViBMWJ_~U{KN@L+vVCDS^ylrwsg^BBohRdTl+*m9gw0)& z^bf`Un}!NBQnHt@@nJT>{sH1oohQrsvV;+L(#JjouFs}*C|MG_)hs$8g-ONa9CgN~ zF0f@(yT1FgAG-@+z)qd0on61-{;HDU>HSI!d`nU!=FIW>COdamzI?RxX9j_^#nA87 z^6H?25qiSZ;bJr+?iAFCFyBA}s-;pU-@U1!kF1T2;!PrwuP#Pt?*4GZ7a}ji)xC14 zfrLhM{DuP};lw*wyAbkJ2Dp)p4(2SH4vsmk#w2q(GZJ!ARo4Ihms$zhKZx?_+Fp3z1R|6-Gc(xsJ4nuCMTZPi?01$mw& zJ%>8B6P&OM+egZ-NAnGce zwVWwj& zmgROJZU|!JW!U?1r1b|0n>$swdAfo9Qb>jtLzpN0C1#hno+g3OU#<3k#uOMw=9@Xk zG2G=kRBkO*A2}WL23Yrq%nX0e`yh8)g@k2|9s4#}5GvabgtHGDFp}UVySYWkQx8D? znM7!%>=i2)y$d(kNKhbw-5BVQp`_Sv$YsD+-0Rvhe8d$?SAvQFoa-Pk$mhG=$ly>7 zs?<`2nb|{R_BI@SY@^4x2-Q5>R+!DhT3Lj!^VYq+rG=UyRYH2*8387W-5T3;svN!~q=1bSLgIk&u8qL#3*AEIo8 zv!OJyx5WktON9`)Hlp5JXF}plBmjlzz5eJ|Cm_)Csg&k z>yve)C$YpU8i(Kg;i_3_NTP{LyR7yWxE(#VPL)1Yd_hF19U2L#Ps#I>31oUzomyPM z`XVf70q%s=pM!mGRG`DS`S@`V9SvK!Uk&uM111NQuC-H!I0TsKY<|+oNawKDU#B2; zjNPN`FgNt_rGL2loDr&7qhUG=b}-{%?72P4T(x+ssXId3gIh90xrFBIlJNXFs_Oqr z^8YgnfIb`cVUjp{Ut@YqoRjGJ#|=8083#|KC9nbI+)nf@3pLk|){#xR77Xf44bw0E zp%M(b{63b3k~m7p**o3F`x@Y_4oKA6UXX_NtIkX-4`u7kdPAZL8j_|fkp4aE-ZX>i zJulx5M|dj(v{y^$&__tq;#wQ=_Kt{N{@@RAH{d{T&Bk#-77FEZoF3aMjk}n#infT@ zsZq9!l6#x7KAzprjj0V1m0s)`xG&Azj;33=@V4ce4#tZYVvz!0|JjU&e=G|IYU^8~~nS9UvvVz@GnmNl&LsE z)P*vSPI)z;xNIsf&&Jf5yFJ-en8JT;l$0eAs7n6gr^-AU%Szovue|Q04%W{zceYu! zW*pY$zDjtDzLc=t+g9nFW>(jt^7ezd?VcuEXPFJuAFOUVY}$NN z!Z+-p>P~kAC9X`-09-xS* zq~NANZQzctfv}Q5=;Wn+i`b3V+u^QzHfn(vMK_B;mxtcQi~Y}ZT3yfLQ4Q&fv{+9t z^w_zjDjAn;;utBT8r{_~RmCjCPLP=*%+E0R8lS>a>S+0-qq_?>R(E(C%i*eA!Eepo ztqXiiL*g4V>EBCOU|Awmz&+=3JRjI1>!D08J?1X%Ty}7sE((d_PlWgxj1jdR$ivCR zN#Uz;i#WrtRO1GDKC#aEI_T3Y7-p^`KreEow|u)q5s0jRHOo>lGqz=P1A#+aB_o+T zjo_T1WXM|3Vk4M#J<974;^P8!FwtOu1?Oza(9$TIU-199T0PI2IKmWPh(6nYwMjEI zH#V;J>2_nwCSfcrpB644%V%hpc`a71z(QMJKf7P*Z|cE)5&Rt=FNisOP|8o~oGLWC zmc^%T$>TV`50hwM7HD~3pFgio>t~y47Aa#t^~B3D}k<+UVn})oXhE7 zXCtJ>M4j}Dc#HrJ6%w5m_eV{w@0t@lT|8ugR(@584v=w-VWfqHz-2W*)xElVlkgH zMk?QgtZeXtA*cj5@PmkAW?Ql^Gga4)OKeVibQH>>R6?$pcaB6?IGEg6&4Pi#AH7UX z8}P~at0h#ViEG;_K|OL^J0f5$K@Sv5=1O%#f{h#EI^eceVnm zw;Y~KS9ENj)1oJtLj;PNlkPFbH3qo%#H8S*U}(ZVOb_ZxO>uR1HM3yLU6;=*V>YVg zKJ5sTL!FA9o<4`q6$QC+E17>SLhnJubO3Z`7i;=tHM=x04a}rZB7q#X((n1!}!KccHr*e zqMl#!#m%p4rt~qfD>HEn>f>z`_zjGmhVIw2NXyuWFJVufctNSnsa^~(UH|ETePR1IWE*k1Ap?W3aCp<359r5o9hT7)syggpJdZ zIOH1XLc+L@)Pxlb6}Zeb7B?U2i6HA)bT_M_fxY!?Pm~2rrKE^U>#;0oCz;%*xqK(8 zdY3j)8}PEBg#NlVrxOs|?Exce&#la8D0UWLulM+^UDQ|PDg5=kD8qxlMfglJgy7P- zu*20kg7oygj%F!(&|jQQSJ;!-I;XP7Fn<^(^a~3#Ffj^Uz6hs8CZjE9%EG4$_Jmo9I!Iw7F4k7;6=0i_myB(N)zwv7xvmYkL>-Y3c^(cmk?5{ zxGxSIEs~P)9Eo)0_9zc_yXBKSWFBwC|7KI~Rfr0G|Fv7ZvcYv-qDq>V2WQ=Dkrav! zZ&}GAyaq=zYfUb{U5;90V>kFa++HAi&%|2|?rw~1h+%atRYE_pV0*ZDJWJiDZdcP& z9r46J`&_bl4I3KkM;et}A-5mC)_H}6{H+Loy@m-NELk77!@cc4?eb*r-U;oWbwojd zE?3};kn7UXT^T(+^@ZS%dNXfh1XV{2x@P!4MBBP}Q3HvL#69KCVt+-sF2HC?zOeG?U-S!AC{_P;$xae9d&u5}DAArdKA(+V5Q z>pvbwiw^D{od-(DLEbInA@`gt%gitmse($5$`^^zB@vTwAP}S#Ug(7A8iHQG{*c$32E&Nq=#AC#lkc69i^?E+#CA7bIn2AsX`Fc4>U*+|A>(_ zaK(|$mkMf-`&#*1;?~K<6{fFw2qHk+cjnJpc&SK=@K zgS?&iD_lO@F^DuR^yy|`U+cv1d7k-)Tm4d^A0GW(Lw)&lwOG;H#-(;vK56E>YWiG?F7|4r_4sV%6)jh1+l z(J`}+v^gTJI}d+K{7`us2t|$%5!YrX(HTFv{HjHJUC@AMG0HB9A({d?Z=<}0zXZK3Y8cghwO$Mq%cvL4`u3ifQtN?fTP*jw`C8K-VnqliqziaVQyDI@4jRn|>)0X1=XP*wsBmoo1+ z^+)&;3zH$b$e>&7i}|*H8-~9TVrO`BW4`KE>exHn%+37O=>Nx9CRLSBea-)LuH_e! zArlX4R`fB|o3o3JHeHAX%+!YWY4QQ05coe!wq3;}MS7M#FE^J!JXfs#(Yk{^qUwLu z@sE#T=1`5A6pyRn8Ty`*A+=(X=0I6zjo<7v2N}EY)R8_Yq+AD z?iV*{eta6G0JVlmH84e9qU2ZYpEBvsT8RJTdLIU*{6i%}P*RDFv!P^1p1}2Zuefv- z3N{r$ml?!-spl<4%t)DWjLM=;f$S&24A1|nxJ#D`o6RPG9uSP%OcjFFx+wP=v8+g35)wOBkf24`e5f_(nOIi|@DoghB>4c(nd)IH*(YkTxB z?JLV4G=IY=)Z5YFWG1rlyd12o-I$@ZziRFh>$)x4CPn{75Q&BB{lV-6CD1bT#+F@m zj3#f~L28$E(e=_TpVpuDI3zvUS~gt{`1LXOAicM7`zgxv8@`)_lb256j<3Nwc6D09 z@xK0L=6+ zKzb=F?=2s*vJSCVPl3axNp$(n0WTt7oD-{Z(hkh} z@4Co7$We-3IWF?h)U~sKYEdO-^nxBzORlYTIm6Pl;mw&Vn9uDU9D}8wO|MX4KDo>r zrFF!^yx=GXERqH+e3lQwosvJ2K8}6Mt1C|({E-8xMEPywK#QE4bts!~MeTuiAUx%< zY{qwGX5i^U%qREZ`ukY(6CB6qkri=O*s-G^?~b&PXJq`PC>fG=}Z1 z5J&&xMY}a(bg3j6Pi2C^i#meUHV&a>4APDX9bY}#(HR0`A2Qcxs!r-UMd?HwNc#Wo zYOuXIGkoNVmB(UDX>7|`|At9*32M(7Gm+Y$K>s76sTJYD_-peJK|KpByjDQm{i&?N zug&&bHkQEfQ#nc)T={=AGyZ?CfXMON{zkL}b(H+m9%t%;)U1SpwX>v*ExLm+jjr!> z|Cj8Ht?$Z47kvtc7RILd?yYmZKfS~#R3h#aM_kj;PF)NA-}=xsDlzHfEdO>mOQHss z&3N_5v5lZG>#v1{&BNs}HRBlMOqKs(WuaZg_g@$(^o->D&%6wXBn1;z5D=0D0s$a; z_CHM0oB!P;y#|B;8Dju|L`afk2EqXc|DX4}d;TnFklymiK!gAwS|AWak5WVdpnd?D z5E1c!xTK79q#)c3BNzzGfDj4-LY#wB#{H`4V0H_H7 z*c5o|aL6!0BL$rcVIKLl_-1AtNB%q;+npWvAD0PF&AgeL&R z8~|+&Kz|FM9|tT0N`N?_K%8hGT{e&{2Z&Mv{1^=4c9AhyK7yHIEdap%AZkJJi0p;* z1429yAVv+7LJLfzM-8XPjiF}{KzO5rqr?0NB*%)Shb;x-%>X%ngTvC|)Pi&M(d)-E zGEPAd{$@mA!YhPeZv|5Jr0WPmJ{b*!1oBt@(a^_qh4x`PJh7{B&~c|H4SgI?N@-k}1D2y^NC2i>Lep zBSmxcebw#eHT-_BPv#yNLrR+EJv55O0c4sMA2*b634kcJ60P384hOMfABCENmD{vk zvOy;3v4hx_*00Q12MeO*Uj9crVPH_=XCv`vk3{BJ+2rtED45YNiLaNE$j_b~%14in zod>pwCQC6^`MK8~oK1Jm(gOBXE@klUr?J_{2hry-hgvr$;djZKlr4k#!)}hI-Dc1Q zdB{4jd?0t!q+Lm7s~%5SV~BBly7ik25y0D=w=`W{7+rD5vnBFtrP?+#i!|{@h@>HX z6H9kyondR!H0&BNmX`MIB#o-{O5~k*{FSa|#^z$om|tM~dU464{bJ=?btD*zo$4B) ze1vI$YMdg=X1(odgt|v#T#A|11Eqk~QpG;n+7fZpHTCjoagAb!TbI(D%!y9=s&Sx0 zqxrMsulez2v<@Cn2y^xt|3^0J`$8oh2a=E$U$7bl_C(Wf`XfBI`*DVL9YcCYc;RAY zwI%F4Y|V7MB29Ng$^tOp`-yYHjnh6^FnRp>0oh#h>9WftA9XWYl6_h{bb+K98>-}y zKwnWf)B1-+S^=}VbfU}B01Zm7>bm5C?$YHL{LEcqb}Y8Fb=PwCDb531^P^a3-rG%y z3z%262wbH>%2c>Z&#lU4pTM@3l6-W7JHW3yLdUXT_lmh|JJ6yVzB$GpPj!d)i?WW& zhrTX(DL`pe*SPH|Zb;qY%v122@mYBwGGpG=@ zR23JqP+a#4Ni7D69R;tH_v&H!(rk5O=CH5^LskB;_$K9HW(CaU!&(~o-ixyEMwS0h zzWvrQKLnp$y-6ng8V_p;WhP6B(QKO~<#<|I6LaI6f}EM@Gu}T|^i3VIs8L!!N_~}i zJQ_qpwGA4z(ww31^;-Ir32O358H=ZU6<(QazZEZF?5?Ux$Y)Eay_prb=0^&i@o0*) z+b6I0?e4jnl{-D>SqUay=#?ehLnOO)cp*icz5aM@`sSEuHl9ngIj3selx>Cl)Ks6d zol9GP%5THU^y6H+T)n^K7F*eiz%xyG?B!huPO8m(gRbkmuzVg=1l_Sx5Hm*rCvuQ zE61IGn_1qcq$Ati;PIU!H9P5#)bA+x1u@T7JLiLdl5JmJhWx1!s0njcEyLGU-pf+w zVJ%0?6doM7>!hc|=Bt9A(gT)kLjwsExG4ErI#t2ruc#0Z?izTI9Cz1&GMEd+h&{#m zB)gOtA0!((z(^@8FPL^ERXy0{9A8q;#n=ZXi}Q~5l2rtLL5Rm$JaM z&-mzyM`dnV5)26vL?t0oQ{U>+@UDFQ*|PFPz0m7lj1&ct7_OHD0b&t&!`sNqc0-gY zZgIe^hkT(Tnb>_$sSCdf$wbF{{Lm;V`arI%sWqT7F3FmCUWRs}#9|Wb`qHOecq@5a zl#DOgoB-zf^78RJq!2>*h5Ot!;E_F_0wQ7$(*4kFxrp9-U>C>{@w!C$8f+z9S_a|z zi#0vhWTq(26jyc_2_@tr%k{&R&2U+GfVTGaTrnWoY52z2$mwDQciYQ$gh#L(?g)qV z`E-;(u~@BSSxWIIF^TCO)Z*_fP8Z}L4`KuBzWct&ujxx*=veEw5DVOUt|#;)Nu}x= zuZ(2BQgp3-9ZoPvhDZ?11X!>Y?vzZshJe^Ru;EL0iE% zuv6`xK!BBoX+S}!jh=L2uYKCo(_(h!RBds~d9&OlR7~?*WNrZyR`CVZt)9o6smRx* ztdI-{lp#mmpDU;yD#dOlH|8J4L#SIhsehCBO!feY@AnylYJf+bdb_ zqFv5SGl4qlm6ev3Zg?}d%T&)Rj22i4a}74*m~)oFDd(MLbH6Ej3xmb@FGZU1=?(M! zkk2iwt57$y_G5L+Fuv^gleitJ>e`#i`q(5wJuXF@b+S%>>})O~Y)H<|9!|Dbplyq} z$|_3ybKj##8ukU;3$B(p3?do$Q(zOrzZA_(X#$2IL^BRe(;dlgq5}r??q`s)JD3;` zw52|e#P@u$qur?EIi`tJAaFi5x;2pgJSPLB0;|WATgPjU@d6@5$Vw+-9jTiWJbnjEecX zWKk?}H0Kv2;oyvUkkehsJ>WRg1a)b2>LmJSbK5ssutRtYg5RSGyYQN=KnvH@q!9a1 z&ga|Yk7m6UUi+AZ(;dkpWUfeVX$E?|5OBMW2>TrZt3QN$FoS~%FZxeSpAxig(Ft-% zroXeo+nB{lyW-oYVtm%*9%6mcm!6Q2T);X*lqaUX%Lv?{-s}}-Lh+A#=SrFJ2j~Iu zAH~6Mf%T_u*W3A1vv0ZRwLC~y#ccj9t6M4bEZ;T?$T>wuT&l8N4uj==ycc^pQXa(} zdkTo5>O|BH{UFB}$~pUj?Y4TN)m9R3I64Jj2Olw}ke;N9^!H`IS|e6jb&i-ne%#Q4 z^&o_6@$So`t)5iBJ7C`FoXUa8u+8{*PjcQL!T*Ju%#la<+i*!2fk?yzO$xKdD-6A= zm@yR9#&Ca7&1&ie_G&eCs^>1aj5!>4W?Ctxx+&p;#gZmR5VDV;+5D8$uco)p4Tf!* zDK^Y1x^&Etx1yRxMd+bM3E%cKpf`rwQxvpbHb&W9Jxm%C`=0gjXUY&-Fs&ac@pF0> zC6;Pp1l&Fz~!cImcgwsQ(MISaxxI+>Bskl!)FqZ zVx*8kJPdY)qb5{%OS0v-VmJNvf#2K(YJKtM5H&ib{rB!-VcMXFN)n8U8l3+Lu{Wz- z;GAb#YOy!|zSF(k7oB~-R|HTZsMA^k0KF(~pc@1;9m1B4kn5I^C#)PrgJiRnq(=hA?vFM;5?WFqLKA)=Y>8J240p3T8`38)+}zB+hZBN^U)7Ua`2& z(1GJgF?eCT+bf^SY5gks2bSSzhO^wp=JGw5$F- zwYKa-keUjlXYI3NJG=yCWqA}NQ;WnPz1+7X1wz8UFbIzx?q06Z?2_R8?!yWl_9GUS z<*PRdhPkYUe4+jM%I*41u$IxT+wB<*$*nMF&CDwK;AfwiT^fh*r)>V8noH@eXK`_gnZMt{ z@x7(!`CRtCTZ}c*;QR^?X13hilVNwjI)ve0E{SX^Wzy6ZX^^47+nTr!lvSES)xt-Q zT4oA80dtJFU+IW_wu&Nm_$fIpD{8!ED{M>wC?>=@JBVAL8$>7M_YYQ(t>CNRkPt7xVsIV^ad38sTF6?+YzS>gKk!O$9B^Y` z3?Laqi4ud*(4Ij+HfTf~kR}gEt66aafMSCg^wCkQMJr7$jz7&P4dj&OVb{n5dRA~i zxM@!y48YQIq$VvA1rP(`PH_MyC;&@2OGis4N5_#KY%NbUwGn-gbm|$WX34MB3fSR+ z(S{bY#4bHCf&`q1gEm2BNyX2P&BQ5plIO`JhAmbTDTeLYLY3!eDJI55bP8+*g^vqB zB)9Wy6sKiGg>5NDMO1^GhXYtii*wYnp<-enY7{gI4ecmK1oA+M{vnl85mAYW zVS^}BBO++dIAoA&3C<5vIKeqVX6S!NrN~fP*dVF&uSH4~r}bAVjZ-6fXE{NrXJ9Ev zt9*w_{{o=Mjv%=5UEKUDl`_eJHUlme7$^n~EygsnBj;%3`By46VuF?f`I-NMN}<)% z`1wJ+=|7!P(Ek77l!9K-1r$QqK2`W{TCzgX$J zcnS?ig$#;KswiYq(6Ri7R7ylhMD$lGMMhQxfux{4gRoN2j{b?2LWlf6tS>HDe3Y8*}A@u)-l@_Ujky@QQ5Id-k4wEvIM!zXTn%29{VvsF~!yy5|G>gC~ z&aun4XnMC_JDa3bb^F#S?Gzzj1x$s~f<9=sK`^M)NLmJGKd0=(sF)u>0oB|r{J!z2 zMVMvte*8!p{5mJz)U5QtyUwnnuk!GbueZD8m`nxFQb44ZGTKOVwvu+W>n^9|g&=cc z4SuK@PvhIqipZh|7C9cDVr96>!LmCFHBtzoirIS>ggk2(;V8e){>dL?O;gcVy`gJ+ zaa-iAcgtsfXbC2oQW3C>fvduj7q>QRx82F;BAV{*AE%>j>rBsG<)+2ACaO=IG?cmkCJNgbqMLKH$Dwf$xPfX1Z z_V8R+$4I-5GD$S$+dPBxoA}*6lduAV9{T!F#ni=payhY$<#-oT234lOSF%GEWe%#I zCn*5{oYoFUu_jJK`SNyLee~x#MqFhY{=w$+nq%c3BYNsY)``*?t6px$XNl^4=`Z`) zG%!~RJy3kY@PBxn>lEN=HI};X_>smaocBcsKc1y};2qT;hAXTvmVYfUjx0Q$N!Cdz z3F`cEaX=71q1uWnqVQ!euiUC+y|5wilzKC`7B}2HoibrV-{D?CP1Dk0T#&*6+{sTF@eo7az)20CgW0G$QtKs7Q?$)zDJ#H|5yk z<>iza@^7We!j}SU(?Ss)b{7WbX#8CBX6BhdD_u#PAmsbQ=rAvo>`zujIO+}Lz*{2I)t3^s(O zUg$8xqql_X@dm@RL>8*Hx`!(r?)~CYsm#5544Ldf@#<=#&q;AUTI)Nd8!;~ws&BBY z;!?w=HZ>Yx^QBj8?c_#nmo<2*9XVhTI{21Ffyr5|XFWNFMB|zAq#q)8-Bl+Sy+t5n z2K!{obt!x85&(WYj?1PHUTiS&z9{o2}Xd+K$DCQR0{!3&S7b;2(aypFSn-8ssp|bnq{FU^*9S zQvDYyokm;p9V~p2(O~lSLxx1ikkHadi%VCW-*cK!3^MmD_PAGR$IIYk!@WYieD(y6 zvh2TB`5v}%@zZ4~=xKKzF61_fLbrR)*Kzq*Zuvxivo?DDG)ROe&+NtYKrg};H?nI! zu_a_LbB=q(Tldsz0mCd_4HGbyITQipGo=5ewqJq{##mKB@cKPlS5En{<61H$(X(TW zf2N%un_Y+573mHy_6J@14;HA^i!xsw`{O+EC8{h#ZEcvD38txJi!R3MQ!!2^Hfgf2 z_%OLIH5)2Ks5ZR%kE%Vn*LtuM0kYS|szQ^Z#(ko3kitBw86Pm0H!%7Nbb4P{Lr8fR z$w?$JE-IQ0*#K}=p1GA;7mKXW<)4(gU0$sm9<7jx7&WS(?O4yKithKZi#@xTo)~!$ zQ`Xuc?Bw_jE&Cm3GUEm$lbL#a<^e@9AfX$S}2gH*fycI1U2EzS0_ zZR7S>XSo-4nn2-sD5_@!E)+q3e~8|W;&)r<>6g5)_+JF*U%n@)&`JEbqd0QFd(tRB z>^2^SnUBV4#~fWuQ7cba#<(t~MK4@DhQ#X6Sq=WvAxg&o*%-rJidVqeN^t!8l^9(1 z=Nv5^cJZ+Sq*?U^i{lqqd`LwSheC(Lo*zGdZ{&%Z9)`&`kK?1c@Li$>mTs*vD2I9R z0uP0=2)YJwIZ;C*3zxw+twII3tq7c~26qE>tKI+Fgl*&6bqvR1B5Rq-cH zo_w0q@z!qKZTGCRiNe`$x*=~-LMCuZcty(qeE#19+hTcG!KrcdQ0mFh$&5}fD<`C| zXu~8eWxf)Gwi&egLsVE6nnyE^UXn_e?z5lf^e%6j^r-Bw_-1W(6}258XtWe_bq_`j zGulk>A49jFX9^)wlvadkq<0s3=w^G(Ogmb*QPYP+Hl+*&%l_P2#??IP`}?fmS~jLkPVJ1r{+o9D{Pb?-%Ic(GhGcV z&sF^72n4EV_HhUnPb-`6eIsrT*8MXRZUbJQ%f#hSIYdmoyPZ7GyIVeCoKEqqhbzr8#@Ky= z0t&yvLa^`C;>wE)BDLOG;zQj%RvD>NJKnnZ>%xZrk@~5fDMz#nZjQwBd?Ke{dPQZy zClPs-NoFi8KH>7Z)LHKWAHN1LMS_GdQ)F;C_D}@;~DP+{wP|MYGM{!~8 zEL8MvPh2-pmv)}}c#3L?h(ySm_r?KlL{&n#l+eb^ykQ-sJutM+hV17hfN>{p=I!Q? zRYigW>_78bI!JYkj6l^I{g}Pr<^U=E7X||O@kC!jd^2sEw@eH zl5zFSd9Z8YIbf+y#lk8-=*zqqjKimRaR~nxZSNReS+uQ-X2rH`+o{;L?WAH?Y`emW zZQHggwr$&a*?YhJ+B@yOd(QrG-(0J;^<%6#$DCvK{&kEozAhfPp$Sn{d`7#$tiQ?< zI6FfwfE6XE!&ix^S?CaQ{0y!Ej_)}yUfj&3grj!6dNk{vAMD1vF296YNic2@wL+pA zK{OS6DIr%{zSeIN`l!(?zJsWKt9d6TCJ^^i-paHYJ^uEUB%jtH=# zLu+r7gS4wdGM$t}(PcOEiy@UF$3%r3d;W?Wg%UQRT{KJf`A;oULwojLM1WUo4h5?= zf%Wc!73_mH{m$%8KI^$C40-A7@WM&w|HkWk&jKzJ96o|bJ`E=lh2Tok-Oecfm0z@p zZ3Jw4-%%|kt=YG3RXaAl6~afIHxJr7W8U>>gfJPymEz{?omD;2eAk?;sbo*$qeL5s zW-YPVCh%FJ^FoGPl!uVo%`zuKJ%ls4f?#Pghk}~7A-;Aa3U9I^`nB7_TR~v7qtZ){ zIV)kHSTpdOMdIxwpvK(fDQQ_ z6zCaZ%a|J;+f;`S9TFn;WE{=7O}_=!*(=+lt*;;`9C3p6n5FT-*Cb{YgbSqG#YO*Xvpo=^27@x8~MfK#5t95+1)fx7MuFWAi*nnR) zS4z#R&s~G_Y?$_Q;GD6_?($kGqYH?dh0(eFo<4E(ZQH&=Ln7kqB)4UR35S{)Ao^6Zh zULgaD-px};C4Kl6r)B9w4~JpIha#xR5dI)O~5DLk*?&kq*hZjBP`_W^%53{$?#AKZZ) z6EvqEKeu?ku=uZ3o%c_w_A5EDJr^#t<(Gig0f|WN&WmNU>BH=n4FUulAk}x zr)Hhr^xmI5JO^^U*S-w}tT3!0kSRYf|X5VQUCK8J0 za)))xU%|*h{kOz>T7Fr;?0&b%PsDlBX|)w9bH=+VIdDstj=X4?E=TdA1c*Ybp7*#V zfl3{U@{9uyrXAc>e{+3r%=X7T>s1y;I=)<3X?TS_bG^G<&k>p$zt7 zEOf8G`i6i-#x%h~CJYY?uHWq;|3oEWiWDmk$6uShvwO%Iy4zHOkqaAD3l!l%w>{t2sr#R+2B-(Y%fv_ zyP|dXCx%VuS!C3SsE>wR{LclcL-3K`bQlpH}2Qd%#Yt^zHE zLtd`R-*`GMmWnrdZfaYf`R`q_Tqj^-h-t1F8L;jeji%gbNXiW|T1*rp&%Ia*%n{NBIf3dJEt3RN zYw5TQg)M_%2$#)ie!QwZOAY}mMI6R$b%>_7m~KZxb={D1{+K!f@~WS?#cDu?X)Z|7 z#^T~@MlTg~T>?f=Fy*i?Y||7HS&X4E`_;Q+Z}KmzuI{uoWtHD`)gt9+doA=B$<|xI zFJCXem*AsS!Gs!>ELs22ecNP0p#zh?SHO8mK8SgCTJKd739%Lm%(d>#ENu&onQMThMb%j2SOVcAEpMBF6 zLGA$@saJLEgazhDJp75$&l|)n{lX;6x}|5niZHEljV0T!r$!Thxd?g`SuPlI4+xCW z0#V9}$ec!q3P~2+hF|*_y0chK`52BK9hO1H!3hpeIhz(9L9Y7#6a8ZYv&W?bwA`A2 zx7~Dh^%_kvCL8(2tOp~c?2JX?2&iJlu$SZ36~GT3Yu}pq?NZeMgha91I@L@L;`|fx zXQ2VE?|prLxM$~s^iBuJ;e%lF^01Y*qvoy2R*B;-ilm6-@N9OmulGrAT8%TvXUZ1Ax$WD!9JFKHcpt&K z@u~RX&YGVOh;Y;pgafH?e;a-hN2(*$A zhc`@GBlVTM*&A8C!oukuQ@Cw2owMUj54mQKbTuDIx7 z{K(6N;V~vI78ds_;eSvxdF8)Rvr)ZSS|&JGy936kkuesa@>n z%*Ck+^K|b3-G$>~uZF+Yl6|^%UcOg6(`6XF4AuNMs^Te=n zgkP5QP(n4y(R8KU*-lt28r^W}@DABk{t~*yf~tPDdL4A$7}#(K$Vt@crUANcG9J?>j1;)F04}TPAMHz{?~UzIhJUsXd<^g zC2)>6Ix*Ta&Ur?^vm-!j1(6JwpHWSWoLA@3xxU5B&3-Y!@i`aG+KYU95LTGY+BKV5Xs+i)}H;AI)AJ<;Ju(xNRm=byt(4gAXK4 zH)Q$^wE+M%io7v?#7Z_usZtn`5h8t@$MnP(B=JuV=ni0foU|2%%z*BjP28>XemA%5?V?Mpm1vnv#ZjxctY=TWTQqPF zB)~Zfx^f;-S2u2kCyDH>0MQ;E7-YT`1GZ9@NMoG9WJ;fus`YszH~IzIrv2v3G7vZ=x=~3*ouB~_bk|Ze;>R3%<`OWFipRHz5IA@$svH+0?UhHqn)iyYE*@gN+DoKwM{y3vTWJSbX=GQ|Gfa^#mOOma#zo*g_{?IlNj0X9u3{m|MAVy{_?lc z?)Avsl7*~#?Iye8jdcP+Q^El7)uNmw;!SY@`Xe&eP*uTnV79Liy>>(lT)2fQna&tX z){~|rWdx67(!rd`Az|K4I}9zFLA`!d`9I2eGNECJN!;=AgNQ9>;NP%5TRY3G@LV%3~$= zU*1z8laCvZwK#RrNi7!b*`b6dkJo_sL)}-1qW*?^QceA(U&w^gZLE;{Up=nEfdRlG zdU1-1a^+8P;p1stb{Zkaw5LyF+u>T#Br7}Fs5DrlD%BY+Br==$J31S}S!6w+#Hj%# zZXV{IMxh`@S)5}s-OF6-(}R@~FiIRv%#U&(O2iBcLcS3acCw8u{<{VT@KDdzrYKdN znJEezxJ^^w_|);PUa&mF<7@MR3A`@bfo?vsJtpN3l6o2;P4LIqT<|JLcLw zv4*Mf3p<_LYMXPmm7s#M3Zh9}mq1Z^t+i<=a@z0OHYvFOL7qlpLxIw1qbhM3uJV!OH*f;DFmT_NLq*v&a zb6iV)pA$^tzJ`&yQi^L)Kr}-i7iujRpumQyo<)-=(ABzjqVe+sfz*3H5ZF}fZ2CZk z+x!})kJjv-VbzS;K8^h}tLWNjvY0h?(?P3CQB0|q|7Xo0in*3%B3RBrVUorlm(1M+ z4Ct{vDR-MmQ}VNbE}g$*>(1jsUQ7_!5CW0?gMRNsu}({pb}jm^fdTE|P1Ff;4&)>G zi~EhbuvV?^Cxe{J03%JRF^XHoHg4Gy&7=fQgGEM>P8XWz6Ov8BLf!LeGmJtv>kz$( zZYS-P=3n&1RY^Vhl^R|ysp)K5TrDCd=NGc}nZJgP(d)`R!1$M&T$|?$EvT|36nn=8 z#`zKO)|Mn>C_vi-OBVMHHelT{doObx8*SpJS6P>9rn?h<^GtT2R82kv7W{r-o-Bx) z(V?d9g(h>BUNO8W05Mr#`oWep$8l)iU(A*@5el;|H_*1mZlBt?IVr13I3pl` zisQNK=NDt&*8id-pX#JsJ+hT)z+asYgYDcJt%%f3f@G<|vY19LF~_-@$H@Fw0Qyi9 z&G65rYtdon_^L?jnJ%0oRg=RAd4$CEFxGIRa!!#pav>R$+v88=rRckU*K_M%vRhLl zPDJ8h)sJ}n%xKee{37A+E>a4gv#!Ght%Uv4RB;Q=3ixb!hBdbBB0kRi-V~NwA@%1; z3UgIH=3ERI5YJ^>6p*|LS{9echOJL39%7Z6{YIJ<012v6EgZ8~#jnj5^La3!Q(_iSc9a{xY+%YRhI?iQmI;+OebQt9V1;W>dw4 zD+w2&bkV3<(o7|tfG21F(L73nR>rsH+!TVe6HK%GIr=^I%XWLSydjwh zIz%#)TJD~K&WG$(9K@i6)-gw(F4K+sGfKz&XhJ~1yC(7Heq`8I%CKM-Df*E+d%PIy z$WNSMOR`A};Ene0O7zYbcqGf_6ZiJ}v$-EXaFIdl+X_%GAwM8)S^zf%<~Qz=kZ;@? z+$zg5ig2thjvOHz!689=jXN*1(2@gsY%wgXE7Tk;3;SA!pdISq%cI-dSg zSwdP5Eb>?L*$pMtY(G@W;PxK)2+f)fb~jTSbs`Wq69%hABonenXxqy9Xr&6`wIz#| zte$Xo)0mE|Nf=0ze^oA{xdNHlR=e$R?ar!Q?1lx%e0NfuU+sl~wc`!H^^1c!)v&h_ z^L!;|2#9U>MGLlpnH~b)woL0XEPh`uDaznZr8_tEhb!;XPY++COefY}x(3hcP}YaO zyk!!M=BOtkirn11SQouzG_pW~W{&0Q@xtdRHB!4`@CAyE5A0vpORyYtUAvL; zJNR!2Xzt8srceSjsIswQ7Av%#epsKFyI38rSNPAlOLG|T-xm&zVHRxgy9ysI>z zLuHx8fGIy+{`}e_Ah# zGIt=X?Qq@h@r^)pSAfRTe$1Bh_6G0E-O(lu_m5kQz_RY`0)t|C_p^}8TW1~0r5|iZ zC}ATWX9XDV!Q2JH$^U0~h5;ec{EM<`%gqeFEGFUs)W&HFPuKNH#WEuDjbaK$fb;`( zx%Z2KR8Z&p>WoY)VWZj@4 zner9K?}vszSBhFjY&85bP+II0qt6#e+NS1Z8qlhfPiYGdGJ>o+Dkh#`Dl@@-#7wHn z09C@C1^JuXTjwX~6Mn;*{X59@Ce|3V+lSV*Bl zx1j-Jjylp{Ge-2XqIFHH?%01yw)-1#XlV(bRMLkHU2UHK?llqe!#5B{8o_*yv_a*P zd?aI~lHTMFfY%tI*VhESZ=J0gMkOSL%+XslIoN$~Zz73L(&Jhgq{vrf9{*!GmdwsF zdIS;tkBksc1cv?f``d^ifBLEC{Ent4R5LT(3=%a-xgg#c^uhSmg>S@#&D%f;162{tCOSC9)>FlB4ij(m25 zpi1@qwa8ZDZ*akeLC*x#r(gM;{lGQxXz0(^DDk*4a)j4YAWvdRG_Sboof2 zlK)&faEf0LYJ@61?`3;$-Y!94eZnbddj@8mnh;cZ_|12kQi*K`vofi8sD}Luwuz5k z(#KD2(g-nn2m`lUAXBn7al>dcsDGE>R3g;SZZo;>ll*wJMg13B=R&Y0(D3=Dgisky zw#yHkzY~A1e#ONp$HmF5At1!05iEtE%;rab@d6UrFLG{Tc?OPIYM_SI%*TdM>WkQ1 z=OujqnS*&@{esxCoE{-6Zt2+P+pH^lm`%+|n_h_y8DPp3Ao)88%f?aOE6sXB9IAYd zc68_6u~v`v)#iuNiwzh)KXRTa#SXlBgJt_a3dan;Tj73XDTRB>H1!djMi}-h=lrV-y2y{5NG@n~6Ze51o~) z{ruMth4e#(_P4~&1y_)GF2<`Rsc!sg%lc|22A2Z%>mw+!mbCKI`N{a3EXFWhja{p9 z#L*P_M(OWF5B&W+rmx>7*l&abio^+ueid74A%_rW!CIc{9%@X=`Y;kiPCxGU858U( zI+wzhh~haEJG(OWLRf1gUyPkD8&WefpW)=o)sjLr4{x5HsOBC1dRw#svrk6cQ(*4N zJh%X4JUwRG62T(EQ!8>?s?SZTz|tvSZDQV5{w9NpKHTX@Jp@3R@p3iu_-0Sp$K#VX zF8j?Bch~Peq~Gf*1lC_VFb^8hba70L3pBztk(IX}_`qyA$vgn9K!I1pj;J%5VF!pd zdk!{2NpL%cL=23g&q7ITo+8Dpi}``f24F z^@rtceRun1A&Up}Pm~ZB&Gg0Z3tRh~Mv-Z6)iNw-*~T0AleDkyaFa{5qF21z^ad(N z0^AIh8}+mIdX5P@QDe&uoMeVW$&e_R&aC_nzsfDZ~8gX4&140rva&BI~-& zobNO!S35x6QE{oQ3BD9klk`shYrTRhDEF94G~xa{C}j>1ajt;5Dxz9WWOMzKtv6+! zs44oCH2XFe{^lswr89F?R=pfdg7Y&VbMlAp>`+N0oT(*F`3R zIDB6tclg8JH1k3caAz$~5wn!@>qQB+A3h^#l6qh?YE>@9CeB-mamDvCWuYgOWsdHt zumZapu}G}xC@d9zEGjiwlwl?&gBH4;i=1d$wb@lml}%QMK^zK}>_DWNr0{FoM8uFmy4-0u%5KCe+uZXUi^>?=Bit4L-sl* zQ+8-=zeZ@qaZfAHns#NXbfP^lasHnYe!wE-pCj_oth2)6mYEs2FqTb7v&duj$&hcR>XVnw@o%gg1f)J7Z_BaQu6xR=# zsuXYveGtSOXm_R2*F*nEEIS3Zn+wQN-a`ET3)oEopIZCMQZSEQw5+Dtbzta?%0k)} zzVP>TX=_&^)mn-bG$A7+&1Ma9Fbip2!W>wwNn{X$TkFe?nB~S*1O^2YWV{_xWGrM8 z2pWSzUb#5g`y>S^LJJo&S*MzFG;uguq~@cX- zl<=-f2;EI(b~08!i?{_-+{HQ> zkP%9o2o@B*L!c)(~Nt)PDNKqaQ=GSS9mtC019+QROJ zQjN=sPO8xqts6DyC(hd8?mZMu{k6QXiVnJ_4szbpf`R_g@52ZebvU(d z*xj1(l}yQkU&jhfom<>t5W%Cb;2Vp*x>0jMvNVReqMk0=HT~i`V(^4@$~L$^btOX2 zb^XQnNTEm&2dh9J=0fP}$8onOWO1~@UqF0jMcBD6DNE5?VattZzaZW!-@QW9yr>Af zAo!PD9CT$1ijnf(JDSpq#8O?T;VL_?clbVfp08%1lBZKD~1dCxr%Or9%Ycf!M&$)2l5`HlsABK5yrczOaDD(?z}Amdc6-no$uryPz=_& z*_HFPi5ldCBP<95VJ%9GLl61GiqmuK-NliXw7VP4kPUcT9Qi36ehM|z?DcWk6vfI9 zQCI{T_cgbQmHbsvvYzFvJv+saJk7|pd7I8L!Ui7&@2W;pj0#O(87ZUcaCk1`ZLcq? zKi@aQ7{&^JzfLxPPG#v$xO>~S>92G@CiTG!z zKev#p%U&CzE0zy*yciTds_*CltztB}QC-tT(JSq8t?HT{B;|Dzp18REyt75)d4DWI zWEhZM;bI7LuusL-Ry#zG3>>~(L)hs&Y;Pd!eiX%eaFB$|#h11e2M$C#@(%16#pU7l zKOPDO=U-RUDR2qAuxZ>a|B{1gVQKZLWyJ&IKX_2F8J}$ ze{*bhGoF9?=DohcZG`2wwM*gNS#W#xJtj1%UA4MkiAOiF-qWdPsA&@EHC?=!Vbx2P z%W)X~p`M{?#t@Qq4}L^PNF%QapN4ZJPB47A%Q}aZyv%bpa(i1d`967rOlMp#1?->i z9ZJ*2CNGJxH*9myUDgq$D4yzU!b;zi;bV49EJj%8+&XAU*U<dr`0um$QA*U_Fq#`X0AY)b{G9A#41wiVM*B ze>dm|xM1rk!Tw!gL$i6J_`I}bfvhf`>1kV?Zq_1r+D~5_Hxq?cpP4FO`td7xv<)Z^ z7R$C0nnlXQx_;{K=PbLqLmMHmXC#=yLeW=-nH-*f`KkR7*SkuhM@@oUnArZ;zT2Pj zAbJz7Lk-FGSaWZMOrD$9M1ORQPFuMVJ%tvaJ)7NwuBU)}aeWWBao<&2^e{@X6hOvm zrxba? z%JJu#q8E*YS5zK_+5+QXdnW$;-7GMy#RZ{a@TQ7up|tSR7NrqHqu_1WO0n;(Ki`^v z7hF!L1%c;z2iNZwDB;0?!uA8+W2wan(!eOh5@{MD&spSl-gLy$B64g&-7H=jA_T`gw8civgJyFY2c?_Lf`Aj_xG#(1Ep$}^*Ab%ANBR4vh zhG4b*9H@3*>k6%MhA$YdWdgYdtEx+)xpgEJl~xhFM5I|=Rp@2I4oLpMKdwH|ib8lB zP6_{%i~gmwAaQu`!eIIKi`zFY{!`llzXUIfX=l^cbvqa&_3k>AjhL?_M8w5SehjGDa)d>Zbi(FWSM*Opf)JBZJ8 zf)0lG(L`3%qN+SqUpot58DD_Csm%x=60)(F49h-(oF)g1J%k-*&0`Xbog?G2rGsIA zS=UhSzt_ZnDrw^d*{N^uCx{}9K>T|4$D3ITB96(8PX5G_|3TVhQ)f^kSw_mJOpg@? zsU##~OAwosmnuDPZE#jN1plC=?Puuc z^`fAzYYAxB-feX+gR?-7U1twZ7|mXTTg>!dp$){UR<6^bhHWcdZyBEe3ZgyRIKq@; zg^dtED1|W8;lbid<*KH~BclC2A zdHuA*t>Lv#V7{oVE{KZMuU?L{KaG`Jj}gMq<1*gy4(exh$h*vA{{lt9zqd`|zCE-6 zU@yd8!a~*RtkN}9-=LA0%)KK1(%s2BY-iP0chE62H~(={brHTnd#WtTr@3V8uhsMX z?&8;qS+LH|F@HU`fosDbRCVbEquo82NulV#fEqxXp zKalLDTL_}et&I5@CFUpe{fOk+u)pL`-*yYJjyuv=FL%2Z;b}4!0)6dxm}8@eGH}XY z`@Xk*&_mr^c`2U5Go({Y3WXmv!MGbvO|5dl5f>jt2={FgluU7l9KmMU6>>ht>_0x|a2LdpFKI%Iabgr7)k396D!blH`iuYi)(AJFF0cPUYLAXehfK?j z7H`B)PfFu$LVdQl%l-vXe}<}6r0ijzf^Kmn@#wawhvq^i4(xTo1 z4U;Vbo58O{fRiFX(zelwGiKu{d~+CNXl3W1XuR5y5=V6e1yr^J*F3G@r0Njp1AHcC zqe%J(Wds2|5FeR&y>MC#bp=q2f8ZRXZsC7C8}nnS0ygXael3!P34{o@7GZTA z?nmqn!X~bV(b@C8g(I8>Zj4TrW5+Z$?%oCVb*}NoN8Yh??akt6FwU8}Iwju94tsSK z6m?kSuixJ&KZ)w?p^*!F=A_#l-d3>UnLJZPuO8;EUgH!^vpCL(z>9n<^)GM9v;WQv z{kx5Qly}(ZvQ^jN+%A3QDL8vSTyCJQwjyymViRrK@CBEd_ z%mq@yk{60_;dT<%M{xng+1k`FLId>6(_2u^cjnAg+v*)||F9G@$zBUjO8Pknm+NAW zYN0-1f*+@7xQ7KRSX}g^!$VevUsg2{w>}!BF!gF8gkbK-D?skcTSy+)=4PetN7=uS z9?_rk$kr3su@m+;tzy#T{$!R)SIoN?&21RlA+*5&#Jr`p)OO+~SMy8pk1&N;g`$^l z18c#g^$H(npltIIOTn6>5+@P6>6u>#5ltTQr`WU-sg50G{x%q*Rnt!W!fV~v-&5Ih zeaHB)u_=zd{Q!Wa5SGN6F9Yr{s^?Oy6;1xbmDv>J@&0 z!}%o$b&ZA|d$HWioRd?7eez>_=9GA(ym8~a*fH(N#{4S-@r7c~&@Fz(o8@*NE?=00 zpLe?l%N2tzxAS-l*Le}ZK-XQnLzRMOZX^=OxnGni>`Q%e2R;84Z9OB!t{2=D6)C%f z;G1W^n%w!}>c7Z7N)ShQi7DU5N(s~5axEO(?}lQSSD;}Y**G#lSF6_Bp2YmOuU0eo zAwlRy$%lCN&ruSf5V!T+RA$=uSdUhRI(rt=foG6FP9vexQvTr^>-ddja*q$^-=5=I zI%A{ScB_atZ%sz^a#k^zi(BzN2LPS0e=Fb;lh|594%}D$V?s$3TI!gVw;|D3${##L zqDe7n+t&alOOsP%JFzCXo1(o$*HuCw-=Qme6IZ&ujF2H75$?P%#^Ys@-kc+s)iA|7 z0a-4J^v8lwo%tCUs+8Aiy8p>-RW%)IyJSv`sA2JD`g#L5vol6GOYVCeen4i5UkdtH zfW!2~g*QkZzSKgyi6`D;akmOVZljbKrAUTsb|x(X#994R#V603J|GlqmfZD?+s4~r zL0eL3@h_=#6jGmpOS!K+-w!J(i7|e_OG`JBr3UGJre`L@i0Q>HNGL>aptfF)*SjJ9{M< z#Gf3n$Na+`9)yr*AODge;;IOIPC^Pj_2R|}eDU)7yE*Bzp-3gtM|YX|zM3)YlPKUH z$oK7Vmwzf7zJP4uj7-=PHl)f|-k|ahuCcYCzO6>F^_}{vUgcbK4gEf?W@E*uIXQ*^ z32-(4y!4fI)AjsEM6?$^NcGeDTvxXD$Vu0KP#|)%q3Ed2SxFHpVT3WQ4Rpr7Fq-(= z(guedYXC<}BvCn-@FA?kz;sHy=?_4-S(zZ368#B+?FIu|tJEupuij#uuoP<%kx+{RHi*pf3%O^G# z`j8n*L-ff|nU1M%<5f=DC@kOtXWd4-Tg46fV9{#tu)d3u;)4SyR+=h$Kr8!%-uwMO zhc`!~*0NDGb@TPWI2QT=lDa8&iB{eCvCKY{TIr~Ve2l}>2G&2N(`tq!B+KIPqpYU? zpk&Co;O9iXFj@{q;s%m}>rVIgE+|y7*=%9{ug?0vVfp{h-;j_CXWDK8yHa{AhG0sp zCwx!lfVhw&3mnxhAwTAzcTE`)w&XI_{O@wq!sDPdbQTs?7iY3wyy7r4sq{FN-P2s3Z~utJ^C33C&D{siCJ6EV7ssNKR|DdsdbIPs>N`lTrUa2w43H!HKO*=l-WS^q<}@_DQTMp)OZ-{y~+w zgaQ`@^?w5TeKDHt9}){yfoF}Su%`;&!uW0@%x=#=k|T)|l4Lsg7t6z>MADrWy4*^^hDn-Q5LZ8Y^+45W;*^& zOHN|w+*3FnL}C!S+g9(%o?Xhqtmu}&t&NJis?KRK*}I!b4nB+JU1vYw8Bxpp_~J>Z zh<7!quL|G;iZ~jxI(~l$vO^!X!nBk{_M!%|#5e0&07@?o12YNVEr6m~;tdH-C8d3t zV;sb4HY<*^g!iA6)47VSmr9pI@0aFDmRPa())ZsK7p|FcC9m&3j3$P*Hk@?f0+ky) zOD%I2u!GdDZ}o~!>rUe4Ej64?ZOs7IQ(V#YS%5hRNmV~n|3Y?)!X(`3bfWmGtveOe zs&o32cF~O5;GYca=fUGe6+W<{;vlv^1-x7Q&VNxH84u@p|2=0`tlQeL)yW}^+}2Xt z);nMq{YZ|RceU1}9T?e=HATt#1?|G^_%r+GhdUskLG`8{fGUuKh(MxFu7=2lIE=ZR zM=Z*dXw`2OH4EsPSHz|ncEpeCyQ3ASjG7O$!9p`pNr$d$=ga^ad_C@{w|5O&{LJkp z&Kh6qSVN*HqD(hiks;JOkqyZ3eGz6?I?k)|rJ%i82^X4G zriv6-nC*q0dBMZXZ4L~ZMdI%EmjZVo&%bj{?=uF0;H(v{VC^=7=(xHQW6Si&-#545 z=hlMU!4BhTD$xJcN5)<`BiJ+V?YcjlMOF`~+HR$oQ{GN^u*2 zx}qwl-oQoA)CAY|Wg8b`$huLHlY4LcDqnfZTZQVm9&AYm(Zu>XE~75|WbFJL_+HBz-?jYAr%PIdE*kL`TQOHrH|NW+qj(n#713uF<)J<~02KhZeK$8}Q4@OMd5}Y{_0P#&gRso=b zqf7Yp57+mx>F!vt- z9&QdKE|YTrID9NPN-8*6E;vFpco}FgKs^GG2$01HMKKIQ#b^eQHh+_Zk|Mo>!heB& zx%z&6@o#Mb0Elb(pdZmANx3UC=}#nuYR2Zrg%gi`sIR}c;*KAHm|<1EDS-w&cg`uM z15OaR(1UuhG#$Kc4mK`p&YNe%gPgY3Zfb<2BV^_{!`L-Ni z8Lqk9~R%VmT|`U1D-C3(9oyws#mf z(lc6yqsij%J3`0k7#_qXA{9eDKY^=ROgn7V1$}``OvgNPr?>m{F7?3uthlwJgl7tQ zHvauG3$j>C2C*oVE)yRJ%ak-9lzP7FM z(4KH*PUEq}$&v^C+6V)@%Nvd%d;Z_e-~WhpHdKW7;KhAY#4Rin_ghFn^Y5-O7zu{Y zs(nm*YRSD)iQhfOyhg&pq7>>!F;{SiLSY(Rdo6Ggvi_YYk8>rB#5Zd|3DDh&b<9g$ z=7_Pyd8QjZ+HlP&VcqLZk{R}PuHOIsk8b|`C2&fp5SmqqMd}njCFy5#0b+uJ324ur zb6dVjD7+Kh68OE#5Rfh-lZWkv!MHj-T*~0jvL6<}z`|M$shsn?wCbeX3moi=tKvU{ zs#g3syl+~9(S*yB=bllUnsacIei+`5J4dh~IS2AhJOyHib_4%`(FkINZck_yutmZ4H47^_kx?wHN^mhIxs`n|`IR>pW{?W}NW^hb7YS6(lN@7ycN-Brgb;a&sm z0KSn#Ed|OW1}n$kM7}7hVC!X?kTbqCJ}kGEyN-LSOt!~&80|B9MOKzcGR-WQgwA1S#pkRUAs7-Mt205fHEYwDp$bwcbBD zq>3NPp*9H`^msPha`7k^$sT}&^3WB7E}tK7_Dnc=r)H}+{OWS9HvC4wzWXqQKLCsZ z7g8#Mh67N?AJ=CtnjC3i&sXvMhJnEu%tTvTNIb-Gvzeh#81zUaM?=;pPJG{J^R=26 z;^tiQ;4*qPAhk#Ux>QDWx(+AoGJf0=Z_e|<>)O|lYI@s=7Bci?ata@d(CB${wA%m0R}lo zW_#K04&Q?)UIx26$*)9)xEZz4iClcw{sB0U+uY^IuIaz#5wreS98L}-U`(T6+Vg8e zceFPcO|#EHTDYc6?Ts2YJ4*C3kOaey82jAKeO@!yfHlZGpKTcro;FKDjg*OaSC|27 zE9|V(X=e}#W87x$S&OTh3(S9Iz3P`)-!MbM%vN0Wy3iC8Qs%WTk>m)(fZFVkiI6gR zysp<_kvMUGd0u4ioDCnhf#CS594ia81PWs6jHTn;_Gb$$BoDEpPZ!DW6_<4fY zSVE3Jk_@T_@zu}^$1;>?c?bcr2f@1Ty?=GOBlm`L6TTZwZe}`&>uAZ!(u!HqHFzCs z@jw_rx&Xzj7v@PF%GXoIVlKV5;6bD^Q+rJ!`UZ)}h8}^%FHN%b62K-`j6xCuQx$Eh z%!vQN`x;RW$7X+TO%C*CAK(m5`Zho!;r20ULIx|Uv?jtX^V|R6AEE=GTPFe>AP@np zQvsF{*|fI1btWXwW?>Y1+pD*`(nO-?h2+s3b(tLrjlq7Qu$yT3KTNO;sNgP|1B!l! z;Q~!rMwFS@f8A$7(opZJSp6sk!`CC7fWa^s7~XiIL=q|2du3+4Py4$<=wzW-e@tAT z@n~~s@V_Hi1A+xnh3Ex{g499-U{=6fK%u@rncx7hN=QpYQJusDy&wL|Ra(HR4t!h` zIEtf!B1_e=v^Te4^8IQUZRK@BjJ$SrofU0~vPu0Ak zbMy5s-)K{O48T?}EJM-Pl- zC5*Uz9!ubdV!d}lf^TT@APkizCQtB0H5dAlF(LFb4pacGZFb6i!%I^oqz}5&NzqouE)fLl*1RJtY@^H1Jfr+($U^bkfFJ$3!JHfZtt8q%sm(ENOo4mG2kU zJ#__3js*Cf%OQv;>E6+GqjziAkcXd5zoDA#E#ghBU@B(Seq@sRBlEh~U9>dYoTU=M zmq48wKqGD1M}B}GtGo(8ut-hHi2U?Zblz0I4<4%?OCZ?l{;~~^DBIBHZEENlg?-}4l6-UrzaC^-d)Qxk}m69^d+v*!f>WY7u%)F2P=yGaK!@Qws>^MwU600ROQ z0rDfmHR>eTtwD6a^uiaDQKy6iN}&ZKv~&dvCY+1l1cgQ>1YstG0Rm&t(-I7V_M_4n zBn6V&Aq3j3>o)}YZTt)m?H2^Z!)OQuWUvo)+~Yu^5UAjj1EZTENMVE-WMsk!$5p!h ze|7dH@KAm4|IfX1XP;p#G4`?VOF~4tmL#M}5>kyqD3K(5Qg`g38l}=cM4=6-RMJ?I zq*AF!%OsUbnf5*Z>HGbCzu(XA^?&`>`?~Hu_l$GyIp=xKbDrls?|Y6$YBD~DpcYr? zdxl7=sVK{by0Jd%=`5ceTVG=-*yMm&eR#BK^=g>zHp;fEFHp2&P=H%VqaO#Y+u&FM zN{3-t-ilv9Nccwmq)}Afh*T(`aPS`RO}Gp$$(@r`Uryu-XyltpUPc_{0xuB{Vc>J@joP)` zS}~p)k5Sn>A^_HXWhjav;WhbCH>v=2ZCOyZlMc#t8{nt!)(#0ttHsOu1V%^}1Dx_A zOP(eooKykC{?vk>wNzqN3=Miyl%deY9N@z|FpFsgT-b~Vw0%i1Z3+c8NtqCdsDbWD zbLe|M2QXbAa|}Qs0Z?uPT})ijlcJNI@I&;1)-;mBFjeiA|GLA+?I(+S6@C~m>=r9~ z;({j2H39k6p}9idY1+-Cc=~hcE#+3B&wdp?Zt~JZqEbxf6Fc=KLd0t^wOF$$B5$>X z$%9K$K9hHh4Q+}*x4*#EWDs`@DuGi;troLCgL^S0jHXv3%JWeG3sM=vfc&`ZDlwbcKjP3ezlIoDsGTv~- zHQoNUBU9~i7f+I6bWMR~jmNsaau?LsQJ`UX&RDLrN0?wH3Ri z-62$!lzR%Zs{G5@YEO_1F*Se+yYW_?vMU>gY^l;Km!Nu$C3qgE6PNu_>F>=T*+2&AZ*_pz4Zu%xfQYFI zz{$sm<^V+WW)X0iTF@LsCBnOD(CnZL8g*8{=OsdyPc0;Ery>0Q4NynXg)f%@9_qaE~36k?22!ZXxQsk4V%C z)+$VbR^D6*Mj$j?U{8=TTGzbSts{&1rT36IP;NmBf5ccXQm2{WnUeZ$CaNM%fKB(0 zgr6&~6>bPrOMCC9uHbDnv8K4w@$$E4k**-3&V$n&@r%Q+?06}N0&yfV)+K@Q{M?Ht zn%ggJytpFCqcCX}(C*@gU_$xBV*LozH?f9BmIRfgjMgPKb52P3p7ik|kQAj3}#= zTJ>SOYKLlH|Gu}oG0r-KaF=`{H~IU({wx(JH}K}6yr;d0fk!O8pOiA{SW5H8OtP5M zH|1`{KGxe>*T`try#!508i1vNR48dj>Z?T<3{iOUM_M3C%zA5gYu%0;t{IAO zFO~-}O4A`_s08?!)AD*GtzB`s@IEPn;|mUgolu5ORq3To3Fj$mmTRbCxoD(^Leue5 zF)u({+~xr&a0qj4jHjxMgi`(B)))|LQ@pKz(qUf#4OeBMNmp>R@O$Y^og`Ce(^RK_KdTjRXN+>lOhes{;o&@~Z+4_2 z5gLvXF%Y^Rh^dmvGY!DXH;crkXuUgPb%i7#^Ru=1UHA@?3%)6RmOxD`gy-R1^6h6U zh)No_-qM;wh}RT?dC)0p5_tUZiN(B~dv!&W-GM}rAE^8Og@Bb{-mf?$0*1(dcNl%b zl^6j8nYM?r&-u9G`uZ@l=BLM(I9~~WC)#noLO7*kA%POh1e5n$f&R{K#WO-Ri{5Q* z58QjOUh$$Vz{_-n$|Z$69lfAJ_~Sajp9-(3kVH8M78p+lNy|i?Jvex(FcR@mEEq6I zV4lHm$6tNR0ir4mOnAu01`QuE>B;j zud*TU^?~qrI=GEZeaCngreY6T7y`+pB!Dg85GE0zQ(HmCf=~f0x(0 zrFoW$R+GJh8KE3Zb>RVLR_)64h3uqWWyVx+u`wzy(4uu3ei@>ei8Q_1DWG55tVpXU z40(tU1$aERQehg6(m_2EWLDvooKoWw6h3nZi?X}85r-!fqMVHrdTYo;*Ia=27s!&8 z1R^SnfvD|TV4F#WJR=&6H7P;e923x98VX6B%^-tT1okWY)Jx+2l_NC;p4k#jCE{rh2ok`CNtLQgkg^3p7+fYLk)Jbq zCH_^)c8F^wY~^VX3NqY$J=p~DAhQ;i8CEf!K?O%j^<#1|`fr%&F`!}gL)FRrGs8+k z;n_Lqt6#@HfPpWf+{)12K`cKMEUB#skSCu*xZo_Q$~k@27jYUD| zX{zYyP2fymh0zGcXT-M?u$3s0E7zkaZB61(zAw5p_gc1TyNU?+wwsBn((Fi&mat+0707pUO|{-PbbvX>Or$524{5Xc!Lpe7SMT?jNUzT$yEmL z$WHlqX9`7%a;D_kc>G4>27`Rv^LPts;dKUqadLZe52<_kn@hv-f^aQRJq1==#K#+Q zCK6UOU`+t?TWs0%D|fx@Kv11OUARuF%)HoIm?l{*O$=m_Y(f0ymU4gscRD=j#_4~Z z5laVTZd^0iORkqh;Bw8JIvepN5!f<>6-gtCL)T;I-PUN}Nt)qnK?EXFCQz~I4ongj zOT(idk?eGyLC#EjxDe08+uG1x*W(#UQOD(Fhmo*9S2%?9o)@6J$X^(KB9%edWln(G z12$lxvK00{yO|*=IEl#rYdCU^56H%8z+ImRz)31lDWday{sPPr67m7=dPA>+7HH3+ z6IL2tgoY}1Ih5y7!b9?i&)0J@k-f6h%wiKFwJ9F@Q4Kp%9Tl6PGnAs`)+JmIGsLIJ z>kKEu2`NjW!y<_!b&b-QDW@-QF_VzMgOK=6o-qebC_oxZmPq81%G6;K+Zjc$X-L#d zIF1wcz!Q7?MLUl05vdx&O(BDT2K6+czGtR6qbm-#aaf)&H5Q~8(`}!ei+L?(8N(JkH-W{p4TVGkMO8mqo z5;k9R*OgHyf58T?872@1yV0871(XtZz-g*Rf}--d&tKZa5#ACF4yIsb#_`Rl6<#*jSYgv6+$dRuhiH>?vS=T~WqvE?B1e&4BozltFrdOjDY;oD z9+u6mmH}i7M4i}$2MF}?E?V-e0&! z>orTu+bA&%rr5|6?v_LdX7Lp@WxTk>7Muyy%tj9zf~4HGE1+r*;H=mHDS3w$q+^5z zS!|ODJ6gTKn?{H1iGoi`fUubOZb1~!23MD5uY(yhGaL4Pe99tIzJ;%nwr|#}*|)^O z#4;*9rw9S2dt-<20EA9Mc~9)DJ!Cs}bP=pHncQ}2(uUP%L7+wnz|oTdGHF}M+QDA> zD$q+WlhHmhfyF>xMU@*HeWmj}i5)lv`5H(e1OS=@!bxL7g9z&MZ!6P8wxGH$HZGnG z4i;XhY0q5+B~>%K%GZv=9&S=nN&v|SiBjz|M|&hFM!$4Q0R*>uUBR2E7EKbGJfwR= zz6#;%p$ueC9_Gc*=FFy1Xd+(q8URr})owYR_kbolLYHdO>jD_~Lb_O?lV^nIJx7pM z!sAk$5?&e9x{HMD$;*~#z69yeDlNiDNsET<%S07;(*U>orThWa zpHi7kmVgoH!UO@9$|tKaP!ErKsV7D9!){rufnOjcYJ&ckR<%(H)p=5UpIqxJnAHyA z%dJ2(*n-zOri7XkD|*X6NcTMIkxDj6LQU;*%5Xa~E(J}|W&uH!&4(_^12pS$z5_2b z!!SoFxB+3dxzVn&b;{P~4j@KeN(64&r$J-Q2pG2Gxw$8K6mTfAslbrXjuM&}rR>+xBZxMm$s#%3t=v{6Zh8^KqA5y}?sE%I8gD413X zO$gADcOLA}lo{)FRe!(|ZMzB@(MDaL3J{MHHpqmy5t(M7hFOKu`n3p3Cb! zjZVU`O2mQi6jO_{B0x0>$1FH}z~xG^%!7v{-6u(9?coxV6Aos&bXef6^bhSceyCXu zfPIP+TfHfNjGR}#1#@d3qQQQ~*MkjYjzmyobLiNmR(+MOFH04Z#YnX#T|{EmVH-sZ zG8)MT^AMJWPcWhh2tmq`VF|q?jqjxbFJ_vAX=W+p3 zNAee}C*~2%*PD-MzqOQ{1=Dje5>qx2jsYGAO);RX%HmK0NNk!SA9d(Jg$rWUd9&Sl z3rb!q?Q{hh9p#)x<%^7hwNFHa2kd-I0~)kexj0zhOMtf=Sd>vgJYnVQ30*Hx+?6ur zGFkh4tx%hsvQ4TSyGjy$xin-#DJ=$5%oenI^KgN}CWeR$n?V~(q66=ZuEYUd;n{wl zEprzzYxQ|vNqeDqy2Mzji8z!J4>`nQQdRf+6${O$36hNt}gdvea zX+k}-2P@>U&LoE-OWdLEI&k6I{!3qiFQ9#X}XeztG%Ck zSNILlN+bP5zrrj~Dq!@HpJjVtYT(~XyPPD~Noh0|m)Ca+n^Y+bCNu?P37NBBMaSjq zK|^8Gn^ui-!@a~Cnj~I9a1DzcK=8=|17WWw;3UEmyDL!IikR1h(vEVuJ&}=l%C~@t zFFROvs8aiRZeW+-4(Ur$lH}K9)LO(;R1+)r(z}$s719XHk2A3qSeTya);x-G-AMiq zh5q}q_4xzoQAm({%;Sv^>PRpaHR8AguOzb0Q3aNj6B11Uh`mNhMEmu<{pP7{(`yd}#nLw71qT;&v5{klHM)s(6KhlVPo@q(RMM`EShaw+$Y{!8P@bB>J#}I* zv3k5Tj7r*1BHdU5XI;q^;*ul&!O`kw_mJ+|qhG$s_p zL@wQ1S>sYUXr+0iK$X3nv%_T3p~jWf8y>&BMNl(2JRwzFpJ8 zN<&xG3h}2TiyLa5CEoUjG7oCiG#dQ7=JNW_5i~xwKDyCCY+mD}w2Ps-=O`-W-D?!% z!p6xa0m-ZQ>we75;+^^x745klU0!(SQ}W;1?&r^}>i*odQ!gg0yL-EgSNmWr`<$NT zPh(CKzv3(N`Ll8H_mBdt;@-B|_kZoy4mh|_(@LAI9o#W~|J(gtgFHAsYs-d1zWqs^ z-amI^9XpeM$K@Z$_;?BTEIAW28r7}3aW__zUy*3;t1>wI*o^&;I2mQ0b5&OVtpxv~ z;eXMyK4vU)dBDOGInNjHcheM1=ZbpcDkr?Q3|m6Aw%yApwnI-rBTb*>*H@Nmz>}#-BOpS{1z+81NY4^YUr1T|RvP7fTY;HhZ!2SlzSEpf< z$Mntj?hABlo?SX~Hv9aoCC7IrOD-^ipn*W~rXVyG@gA3^l7x`=5DSuH<-(?bmMe$V-(1(|LO)3-=x2tp^xPF)Rsu!?Ru`T6v`Orsn$W7lnN{9Vl_j^`;UjuQ@P zB+M_r5Ixa5QmnOkz4P5AsoCl?inV*LeoIT#o|}*=+jlE?P2OquF|9cdiXK}vgK;J&S%gM*owpY1(Ya{r*=K_jk)Jm9&r4=qRDjemhK<7k8kq0 z;x@0)YzsZ$$AS2x`KdN6oU$};4b?@Rt!(<^gM2^8Q-%A#FVD)^*p~EnDo7qYR(y&^;#9# zZ*B2{6M3HDW}UouF)MqT?$Hw;12p4LumzEwQ+Khw9mbUAmZ>$$+xJ;oRs3Fi{@^IK z`g`T_(S-@tdedwYPX3&zTXCoA&8(>HPj+ArhQ@Ab?VD0xR2%U^__Ff%-Z{^NXH=KF zZ(IJbPJMytB}=7SDo@Xk58N44mszWq==9n54Fw;rbvhT>8F}`>q+3_Do?2fGiAmnj zxIMp_cITgHq1FG6g^O==lveW(hikmqep=;eU`v$+A4f=XZ@H|qbE@Ij75Rb*`cZ2>eeg5yl z##bg0t4Ff-XV#>i*=#b9tUj==ba!jA$1MZmT6xEnJ3Qv%L*9=g3z{}|Y35W~EAYph z48p#*uN?kTu+U+rjaFaViNN|47_l=L)EKfzJaTfRd39xm^Su?*?r(7jetR-G`)cFi zkv+=oUZZMRe--X*$ojai*kecb!3ejbuC_r3SDQEpG&cksS(P4DHu=R<>mJ$p;0|)n zd)ISEykEVao!{W@Qm;z$c=ysJCbmbnCCBhpP@VrWySDAR6oa7;?Q`+4h$CtD(u3b| zjW!{;qoBh4Jas7_0oZ=fiD#=~FLk`#jZcYsl1O zxOLgtA?pBZXKZ~-->#~py3(f(M-N|HoqO@zGVL|n7j{hVOm#VN(_-tvjGgE#NfA1! zpvh%sP|F=Qweth?+vdGKla+X9^O|@C&-*R!_PZFjc7EUXa`XB_!%NjqFPObPZf)+H z)Wx$JJjYzlg&y5q<2P4E$Gm25 z_gS})qqY~FadweL@$rh;TBTZ-cVFJGzqaAsH~Jmf1llPR`(**U45XXoRBvMzucY1YY6+^qgI-*%5b6E5#N%nG2d;H9e5raNSneN)#Uq+Nwcr)nl>!i z?NvCV&RJ$xym4!Yo#%->NRc3M+5$W)G~o>O()Z~b>sp3d6>k)tS>mhnS+dDmN4>sN z^G3Usg2(FAlhl z%eRq~>@=4^)dkh(%G;KBXnD81I4g{rwD)=YMeRrG7AGEj`JK@gpLOWq!_K$E3BAL* zmgpw-EUWch>6T}{w3S!Z-xn2)$Gxa2PKsGf*q^-lBInljuKX8{vT5IB?mv^(cH8Cc z+2P79H0u^VIrekGtoNP5tI7);3OhdhdS0Zg-Qt#$eOK$uc+{3lN>}$R+wq*3m9zG) zG`h;V`T2ww{>o1iKA*i?eamqFi|&ytHx~so_B?G_)ns^+I{32U=N96-u}y5K%J}Z} zq^Byo$ellL-};t5YI*tht%Kt6S7%E@1{VZPZ7i+*(m#n~7I}Uvsiy7SGU zTLb2khWGS3EbrHSGJ3DPE#&v3AFOewpEr|(y*=9!pRZ4CHId%@X!d2}u`dZ6&E-cc zt&dE2R`zrFY0G#RnZuZufA!}}*YEEJe||*VK9L`MW?FqJwPd6f#xj3(U(>p&RWH{Z z>vuUHTytgnqoU+IifHzuS<>JV%cRV#mW0cNWtOkPtu8+BBkNeH_ANCky5B_GqrXa_ zX!+N5?@iiEtMpuxwET{jVh@^*rw)yL`Iw;J$BUr1hb?_j+&y+)ZOz1G`HjB$d0WZ* zMu#dJ+>eJe&rK?_+w$(g-t|xH&M!JvRXd;O*dY6!^0D;fhn%~M1J)I-ZeEMM+I(_D z&+G%;E8c`Ha(Lr%iS{8pEgU^h^o=JK3L60l#cipP`cZdALN4xcM4_%)+ zrRpuJ-%otX3^{Tt_s2(VL^hgOrW-smn()yfD5GSKAj~DBTxoTV@Je=$-nBy&R9U7W_KEy zQ;!9v&6T8LbfedYr zX1k+>h>oNbi1Z>s~>-Gaqom_%GybDPn9n@ zr*x%s>v>-N@s0WG55>L8`)wc5GkNF8rv<$1wIin+LhSnHnEd8NMkLPMurp6-s0`s= z^QL6g_uYu=OWO84?c0XlGI8Xxb$Dcl)p9JuerX zJ8!)$8YNqw5su8PKh$lrvE<_Dpm7P`TBT*;_^(dwah3H>je^>%EmyLt{=jfEb&}>F zY@M^D$*kk|``GlgKRthU4|+tlWrrTz=NvgzdGV80*V9T*wY?RY)AYQGZMhh`6h|=;YZw{56kj^Z#1x5EsgxZL4>}Gl%A# zsJEH;N4e+O1duK%>_heD>i^om$MjihX!Tv@nS?_2(*p;px-LZz@=Z3}7Z;uS-@qYv zF`ApxNzvp1$~A?3oI8jHI`RXVGBKN*y}ZBispiEW8sFRtsx~J6tk?L0M^_rn-}GXM zI%DtMUpjl&OtCvkj95g6z4Ow(W|r>%*K6(kfM+WY?06@hwmjn1y_eFkvBgI={wni0 zSn0LS^4QVf;#tuW!_=8mVMPej{PW?Pcc**Iw>*%c*OBS|{>sA9rWgBz4Rm}P8%Mhg zzArkFHPhBGYT;y6-8-63lS)p^@p7B6&TCT4En^tYBUt;r6C39kG)=L5IzC|;(=BI}|kkt0j&;h*$M5nn?jj93kXm?1aahfD~}RDRchvvfeyCV@;* z%0`l$V zH0)`IVd#rMCRbl7CKs!8OfS~wkxb-1%oGC(fp~42!{PJO6hu6ciYll>9Ub~@AZZ%r z%vRRn#ek#V-b4I@eP>(IO3#g4r6kSDFojtpD?TCto+l=D&9nN>L>~fojic#qpzV(I z!LWj8r|HVS%gVB7mUi&PE4~!u4mvamN4)7}G&=*>53DE&g9)LCPVnA%F-0MK-z*qW zrmo5l#gM`%;PNCRbDRlkNs>r6cm5@qbwI!P9cood@H(wE=1IisR+j?1F+QF+OXL;f zcGZ!AawqznCnT*QSlSGh-@NwIV;K%4+Ehs5=8bsdb7$|hl481b7Mv0@-)Hn026BHa z39c8+<4`0q!3J|kyRx20wRjW7Ls;?F?X(hwovN|z>={}%P#+MXk;j#Lh=i{`q6G%d z9gWoCcR3_7A2g4_k{0mX>J73wYAD0GJnDX&8OhE-6oqSv@cWxdMBN?E1i@RMtrxHFr+um1u2B8 zLo9`ehC7ZGzH8x!u^JKrgr)*CPd*qT+4u>1S-cvB2`b7y)6c$0Mq0fIeKknS&+yt_ zp8UH|kuAr*ARs~YIrxDhu#bW-zkkqTF|Q05DT)jPJ2_5{fMBBgfUn$H_?sir-XY0= z>-owkjo~;00iizNM*Wc!jq|oaG7E7@y5D7FdiCIN^!4(TRh}Z|1M`<8fE1BaeK`l{1B`Kz#1I|@m8-oKWv>SsQ zzCCL)YM@|GsDI=r=jn%3=7(0aPa{!v(I#FgO_^fQ&nNd9yDD+i6%j59NQH`nLF)Y! z`7FDyjsf8XNA>u#C4LH=V;j|r;SwE&9oKpC+nL}j3dK{YG&-TB1g(|j*3?vJcZV8E z$9_UbQ<6-yYXEVNMEJ=aKle#)iZJXhxGP!0zR)*OjDwkum{(on$s*JA)^lGGht> zyFh#zF}=SR1&E#I%On_jGxtKTyEbDlz!$@sZ6O3AA@9_j-Jg|3O=Xe{5%>bfn1mT= zL|W=WJri&Sl&KOq8r~PJ6Oz9wB*_sqRbb0849hEydjj%DZli@4DHiMzv)>S*CSfE^ z>?W?=Y=8&$;N0HXXTPz}zQc)0ftf%hPU)wiz*qrN18g2>`&wlmjnN3-DmFT0MRFhu z-ec)mwCpJ2c+ysqMKDsjNJ24$6@IO(D)LqN^nCX*8O1IX0R?Sm>%S_|`@ooovtdfq zRqE2Kv@G*#G}voLpyUp{%wKRdN#pARt6xNfTCYx1b@-z9SW1?#AgZ_B7g%9e!?sFo zaRqsE?$AHo$m$&xE?vEy94fq~Mof-rGGVwK&b}P=nt4K}DNq?E4P#U=!+@3!5kLh~ zVDW}a%aJ+~?t(bLpX1MD?v~+AZMOc_Z2-Jh0H=RW=90&ePX;yYedsq9rRUK-6)9l`9Wq3L@MQ`=W*KfEz3TKv#zq#B0 z>-m$%312%~p7qypAaSnGO4`S}H!Ghua56P|?|d{Zev_+A_qG0@)Bh!5RORUEmcnOC zL|;2f&g}Yu&s)CdOLIopjHjpwS;gUthp#;m<~7A7GE&n%ADXPS;r%9EaaFu@nBlbGSNy!0qqCQNaoJhpy{I{` zFPAI6G7y3%P5n?7&yuyQ$T>i%>uh8C$rMX0`6&=?tEr{oCAblj6|kes;q}o<(ZSP2 zGn(V0rnmnbnDVP6?0xjv!Hp(wF5A3uWnz7s4b8v35mv>NmApN-eC|89{&f=@H7ER< zd;M=E{x1?r`z=2*XB52NL2%rMKZVVSev@zSc9lePZC~>1#k%p^Lp8>oFzfI&qbZ$_ z$Tjt9tUFfuH}kF4%g7C~YgzTD=>y9NJSqR*ZBeskJpbOSn!eS>I|pK3yxrp7d)`-t zqgUv{Ln0nzI29=ExzJN!!GEn4pm$hp{7i{NqEWUS{pQ+VFW*v*&HE4ZpXhrZ(Rq zyta{_wjg2Nk$IVojkaD;m&hU+JBLHO$~%AK5l*ibIn*{`Q?0D15hSbOE668=BuCRW$wj<>!El6CfmH-4BBIG^m}IpJxguvAWAqnhPHsc4 z>olIv9(&}X<}}c{>|EiUce)M{WJ#j(iX9VnV&hNmDtW2B*tuKp2yJ~%RQ1w$=@TobaX*%gg`GDE-OJ*lcHu22vUrYQ7U!KWUyL%Z_UF0oS)L=J@4VURC!cQZEbCQP002gwIN63!P>>$@@~et ztYWb7fOMVelxLY0%dbDmM0)f;EcjpAstnQ^l{orY)he$I6O7fB)nmY#iCgRnn+OkIH%FwzIRm z{PpN5EZ~f%)Xg3p8DKkwzS(ORW;{6Q*uM0*q*IX!vR$4FzImTa(3xbRu`hr&kJ9n; z!qPOqkI{R+9^9d*=xp)r#K|ESi_Hyt{&64X{f$I|0NQ_(rCFq#WaOsY8GRl}96X)y z-_C!M;}1#xeFXoZkl#PZ^zXPof&bU|e`N9hkxx~4?Yt_vT$jJ{&aEKpJ3+^Sj@4Sg zIHEL8TJ<&ie<`e4q~N|9q;DpDrg`u$(i}3$YQE{6py%}PjAeUv-m?Abv_OzldbnlV zkppw3G*??U$IqTlL66j{yjAz4|27!R8J0SlkI2|t-?l{;bqiZ+v;4oi+qX`SNvb+V z{y4=-8upEL`HZam+lgJjzf6Nb69UANlHI;^mYJ-coKrN1=TJLq+#x6J zC98ES0zGMSO+207DR~lGIKk_3Ald+Pqw5Q##8uIU@^QxfSJnee8GH2PT|DN=Ho3gHfWU=*5Au?4|EjK~FC zv16|`uE?Vn)67f+Ww;onGL0(rOQa~1>1`z{m&`!Rsim4CPrL>WBgH630XKt4DCN~u9VI4`+7$c!#D2nM z)?i8SCel#+avq~3%*av9?&ac~?E90PSTfmA5$ zCOb{w&g6X}?xNw;SUQhPG(a0R!XBoq2V>O+3s;`D(3)JU78@fdKny6QYD7?w*7OEc zLY8CLsXPj6x;RF}u1Z93yEki_JuZ z_z}bwYo_#~cm_N}e^TFL3ke;3quWO=`j{SJ$+?ExliFBj6M3g#O(E^$8JZP$d-+qe zaF%*)ax(N%aJhAiSkVYGaSe2!1q_;rT^Wv|Gv&Y?hI(v-G)t&R4pa%9%G*JNA&Zb$ zMP347hh~sJWw9g_fp_?C%Re>ZhQ{!!kxGhKT@nH_eue1B^(cBPB{#qKCRL4wmD#uo zz&s-fwc_9x^{VY?I%LujCQhm2-u=djqSTVM3l$|Z;YLvuku-8SOVt-ON=x~dWc`o| zZFBEB96lCZ2yg@+mXp+>I5-Vwek4nadvGgZ5h{{66+;f%29g8J6>8NycpH zT-UccapzWkdKHnx45fSCD4)II<-TzdU~3#CF;KYWL`5{F}pbM)X8)2G)%VsI(;DVd-aG~GvCNj?bo_e-bvfE z?DQK4RMSt5e9YGfkDR=^Y9w~z*0Imqj_yzoQ2l;C*0X5Q0ud$itF2%BlThu>OD-4d u96MHRSrprTOTYD2-1;wa+rH^learRkX$LuO3+87XQ_A|cQjNN2(EbOupMr7# literal 0 HcmV?d00001 diff --git a/test/models/glTF2/draco/2CylinderEngine.gltf b/test/models/glTF2/draco/2CylinderEngine.gltf new file mode 100644 index 000000000..6cd29325d --- /dev/null +++ b/test/models/glTF2/draco/2CylinderEngine.gltf @@ -0,0 +1,4758 @@ +{ + "asset": { + "generator": "COLLADA2GLTF", + "version": "2.0" + }, + "scene": 0, + "scenes": [ + { + "nodes": [ + 81, + 0 + ] + } + ], + "nodes": [ + { + "children": [ + 80, + 79, + 78, + 77, + 76, + 75, + 74, + 73, + 13, + 10, + 7, + 4, + 1 + ] + }, + { + "children": [ + 3, + 2 + ], + "matrix": [ + 1, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 1, + 0, + 136.860107421875, + -64.45372009277344, + -36.179630279541016, + 1 + ] + }, + { + "mesh": 0, + "matrix": [ + -1, + 8.979318677493353e-11, + 0, + 0, + -8.979318677493353e-11, + -1, + 0, + 0, + 0, + 0, + 1, + 0, + -294.67718505859375, + 73.97987365722656, + 16.17963218688965, + 1 + ] + }, + { + "mesh": 1, + "matrix": [ + 0.9995650053024292, + 0.029493184760212895, + 0, + 0, + -0.029493184760212895, + 0.9995650053024292, + 0, + 0, + 0, + 0, + 1, + 0, + -45.315460205078125, + -24.617263793945316, + -26.320369720458984, + 1 + ] + }, + { + "children": [ + 6, + 5 + ], + "matrix": [ + -1, + 8.979318677493353e-11, + 0, + 0, + -8.979318677493353e-11, + -1, + 0, + 0, + 0, + 0, + 1, + 0, + -136.860107421875, + 64.45372009277344, + 3.8203670978546143, + 1 + ] + }, + { + "mesh": 0, + "matrix": [ + -1, + 8.979318677493353e-11, + 0, + 0, + -8.979318677493353e-11, + -1, + 0, + 0, + 0, + 0, + 1, + 0, + -294.67718505859375, + 73.97987365722656, + 16.17963218688965, + 1 + ] + }, + { + "mesh": 1, + "matrix": [ + 0.9995650053024292, + 0.029493184760212895, + 0, + 0, + -0.029493184760212895, + 0.9995650053024292, + 0, + 0, + 0, + 0, + 1, + 0, + -45.315460205078125, + -24.617263793945316, + -26.32036781311035, + 1 + ] + }, + { + "children": [ + 9, + 8 + ], + "matrix": [ + -0.5, + -0.8660253882408142, + 0, + 0, + 0.8660253882408142, + -0.5, + 0, + 0, + 0, + 0, + 1, + 0, + 18.09818458557129, + -69.69783782958984, + -105.559814453125, + 1 + ] + }, + { + "mesh": 2, + "matrix": [ + -0.9390941858291626, + 0.3436597883701325, + 0, + 0, + -0.3436597883701325, + -0.9390941858291626, + 0, + 0, + 0, + 0, + 1, + 0, + -51.311012268066406, + -50.52240753173828, + -18.440185546875, + 1 + ] + }, + { + "mesh": 3, + "matrix": [ + -0.9390941858291626, + 0.3436597883701325, + 0, + 0, + -0.3436597883701325, + -0.9390941858291626, + 0, + 0, + 0, + 0, + 1, + 0, + -51.311012268066406, + -50.52240753173828, + 107.559814453125, + 1 + ] + }, + { + "children": [ + 12, + 11 + ], + "matrix": [ + 0.7071067690849304, + -0.7071067690849304, + 0, + 0, + 0.7071067690849304, + 0.7071067690849304, + 0, + 0, + 0, + 0, + 1, + 0, + 47.17867660522461, + -52.821327209472656, + -88.94477081298828, + 1 + ] + }, + { + "mesh": 4, + "matrix": [ + -0.4234085381031037, + -0.9059388637542723, + -7.575183536001616e-11, + 0, + -0.9059388637542723, + 0.4234085381031037, + -4.821281221478735e-11, + 0, + 7.575183536001616e-11, + 4.821281221478735e-11, + -1, + 0, + -90.59386444091795, + -24.379817962646488, + -40.05522918701172, + 1 + ] + }, + { + "mesh": 5, + "matrix": [ + -1.877404400829619e-7, + -1.194886607436274e-7, + 1, + 0, + -0.905938446521759, + 0.42340943217277527, + -1.194886607436274e-7, + 0, + -0.42340943217277527, + -0.905938446521759, + -1.877404400829619e-7, + 0, + -30.2958984375, + -52.56131362915039, + 25.05522727966309, + 1 + ] + }, + { + "children": [ + 72, + 71, + 70, + 69, + 68, + 67, + 66, + 65, + 14 + ] + }, + { + "children": [ + 64, + 63, + 62, + 61, + 60, + 59, + 58, + 57, + 56, + 43, + 29, + 15 + ], + "matrix": [ + -2.430540746445331e-7, + 0.0000014087579529586949, + -1, + 0, + -1, + -2.430540746445331e-7, + 2.4305373358402004e-7, + 0, + -2.4305373358402004e-7, + 1, + 0.0000014087580666455324, + 0, + -48.26182556152344, + -59.11042404174805, + 34.595985412597656, + 1 + ] + }, + { + "children": [ + 28, + 27, + 16 + ], + "matrix": [ + -0.4546820223331452, + 0.6541662216186523, + -0.6044260263442993, + 0, + 0.8696397542953491, + 0.4726206660270691, + -0.1426759660243988, + 0, + 0.19233042001724243, + -0.590505063533783, + -0.7837810516357422, + 0, + 14.898193359375, + 85.82951354980469, + -48.034645080566406, + 1 + ] + }, + { + "children": [ + 26, + 25, + 24, + 23, + 22, + 21, + 20, + 19, + 18, + 17 + ] + }, + { + "mesh": 6, + "matrix": [ + 0.8450329899787903, + 0.005398945417255163, + -0.5346869230270386, + 0, + -0.4710906744003296, + 0.4805830717086792, + -0.7396712303161621, + 0, + 0.25296804308891296, + 0.8769326210021973, + 0.4086519181728363, + 0, + -74.0894775390625, + 71.41646575927734, + -157.91323852539065, + 1 + ] + }, + { + "mesh": 6, + "matrix": [ + -0.7145451903343201, + -0.1154177337884903, + 0.6900028586387634, + 0, + -0.6522517800331116, + 0.4665486216545105, + -0.5974110960960388, + 0, + -0.25296807289123535, + -0.8769327998161316, + -0.4086515009403229, + 0, + 122.53109741210938, + 86.64814758300781, + -312.3133850097656, + 1 + ] + }, + { + "mesh": 6, + "matrix": [ + 0.8450329899787903, + 0.005398945417255163, + -0.5346869230270386, + 0, + -0.4710906744003296, + 0.4805830717086792, + -0.7396712303161621, + 0, + 0.25296804308891296, + 0.8769326210021973, + 0.4086519181728363, + 0, + -69.3792953491211, + 71.78133392333984, + -161.61203002929688, + 1 + ] + }, + { + "mesh": 6, + "matrix": [ + -0.7145451903343201, + -0.1154177337884903, + 0.6900028586387634, + 0, + -0.6522517800331116, + 0.4665486216545105, + -0.5974110960960388, + 0, + -0.25296807289123535, + -0.8769327998161316, + -0.4086515009403229, + 0, + 127.24127197265624, + 87.01302337646484, + -316.0121765136719, + 1 + ] + }, + { + "mesh": 6, + "matrix": [ + 0.8450329899787903, + 0.005398945417255163, + -0.5346869230270386, + 0, + -0.4710906744003296, + 0.4805830717086792, + -0.7396712303161621, + 0, + 0.25296804308891296, + 0.8769326210021973, + 0.4086519181728363, + 0, + -64.66907501220703, + 72.14624786376953, + -165.310791015625, + 1 + ] + }, + { + "mesh": 6, + "matrix": [ + -0.7145451903343201, + -0.1154177337884903, + 0.6900028586387634, + 0, + -0.6522517800331116, + 0.4665486216545105, + -0.5974110960960388, + 0, + -0.25296807289123535, + -0.8769327998161316, + -0.4086515009403229, + 0, + 131.9515380859375, + 87.37792205810547, + -319.7109680175781, + 1 + ] + }, + { + "mesh": 6, + "matrix": [ + 0.8450329899787903, + 0.005398945417255163, + -0.5346869230270386, + 0, + -0.4710906744003296, + 0.4805830717086792, + -0.7396712303161621, + 0, + 0.25296804308891296, + 0.8769326210021973, + 0.4086519181728363, + 0, + -59.958885192871094, + 72.5111312866211, + -169.00955200195312, + 1 + ] + }, + { + "mesh": 6, + "matrix": [ + -0.7145451903343201, + -0.1154177337884903, + 0.6900028586387634, + 0, + -0.6522517800331116, + 0.4665486216545105, + -0.5974110960960388, + 0, + -0.25296807289123535, + -0.8769327998161316, + -0.4086515009403229, + 0, + 136.66165161132812, + 87.74280548095703, + -323.4097595214844, + 1 + ] + }, + { + "mesh": 6, + "matrix": [ + 0.8450329899787903, + 0.005398945417255163, + -0.5346869230270386, + 0, + -0.4710906744003296, + 0.4805830717086792, + -0.7396712303161621, + 0, + 0.25296804308891296, + 0.8769326210021973, + 0.4086519181728363, + 0, + -55.24869537353516, + 72.87601470947266, + -172.70831298828125, + 1 + ] + }, + { + "mesh": 6, + "matrix": [ + -0.7145451903343201, + -0.1154177337884903, + 0.6900028586387634, + 0, + -0.6522517800331116, + 0.4665486216545105, + -0.5974110960960388, + 0, + -0.25296807289123535, + -0.8769327998161316, + -0.4086515009403229, + 0, + 141.37188720703125, + 88.10767364501953, + -327.1084899902344, + 1 + ] + }, + { + "mesh": 7, + "matrix": [ + 0.7850325703620911, + 0.06081420928239823, + -0.6164620518684387, + 0, + -0.13561886548995972, + 0.9878994822502136, + -0.07524696737527847, + 0, + 0.6044265031814575, + 0.14267520606517792, + 0.7837808728218079, + 0, + -5.746735095977783, + -250.409912109375, + -86.68790435791016, + 1 + ] + }, + { + "mesh": 8, + "matrix": [ + 0.785033106803894, + 0.06081344559788704, + -0.616461455821991, + 0, + -0.3681585192680359, + -0.7545340061187744, + -0.5432658195495605, + 0, + -0.4981790184974671, + 0.6534371972084045, + -0.569945216178894, + 0, + 34.187137603759766, + 252.666015625, + 65.06369018554688, + 1 + ] + }, + { + "children": [ + 42, + 41, + 30 + ], + "matrix": [ + 0.4546822011470794, + 0.6541662216186523, + 0.604425847530365, + 0, + -0.8696396350860596, + 0.4726209044456482, + 0.14267593622207642, + 0, + -0.1923305094242096, + -0.5905048847198486, + 0.7837811708450317, + 0, + 91.87051391601564, + 80.63255310058594, + 166.26089477539065, + 1 + ] + }, + { + "children": [ + 40, + 39, + 38, + 37, + 36, + 35, + 34, + 33, + 32, + 31 + ] + }, + { + "mesh": 6, + "matrix": [ + 0.8620702028274536, + -0.015319700352847576, + -0.506557285785675, + 0, + -0.4340354502201081, + 0.4936883449554444, + -0.7535814642906189, + 0, + 0.26162606477737427, + 0.8695039749145508, + 0.41894471645355225, + 0, + -77.39921569824219, + 74.49835205078125, + -159.90199279785156, + 1 + ] + }, + { + "mesh": 6, + "matrix": [ + -0.6893031001091003, + -0.13550062477588654, + 0.7116886973381042, + 0, + -0.6755834221839905, + 0.47497618198394775, + -0.5639013051986694, + 0, + -0.2616262137889862, + -0.8695039749145508, + -0.4189445376396179, + 0, + 115.9541015625, + 89.47693634033203, + -311.7364501953125, + 1 + ] + }, + { + "mesh": 6, + "matrix": [ + 0.8620702028274536, + -0.015319700352847576, + -0.506557285785675, + 0, + -0.4340354502201081, + 0.4936883449554444, + -0.7535814642906189, + 0, + 0.26162606477737427, + 0.8695039749145508, + 0.41894471645355225, + 0, + -71.11894989013672, + 74.98487091064453, + -164.83367919921875, + 1 + ] + }, + { + "mesh": 6, + "matrix": [ + -0.6893031001091003, + -0.13550062477588654, + 0.7116886973381042, + 0, + -0.6755834221839905, + 0.47497618198394775, + -0.5639013051986694, + 0, + -0.2616262137889862, + -0.8695039749145508, + -0.4189445376396179, + 0, + 122.234375, + 89.96346282958984, + -316.668212890625, + 1 + ] + }, + { + "mesh": 6, + "matrix": [ + 0.8620702028274536, + -0.015319700352847576, + -0.506557285785675, + 0, + -0.4340354502201081, + 0.4936883449554444, + -0.7535814642906189, + 0, + 0.26162606477737427, + 0.8695039749145508, + 0.41894471645355225, + 0, + -64.83870697021484, + 75.47139739990234, + -169.76536560058597, + 1 + ] + }, + { + "mesh": 6, + "matrix": [ + -0.6893031001091003, + -0.13550062477588654, + 0.7116886973381042, + 0, + -0.6755834221839905, + 0.47497618198394775, + -0.5639013051986694, + 0, + -0.2616262137889862, + -0.8695039749145508, + -0.4189445376396179, + 0, + 128.51461791992188, + 90.44998931884766, + -321.5999145507813, + 1 + ] + }, + { + "mesh": 6, + "matrix": [ + 0.8620702028274536, + -0.015319700352847576, + -0.506557285785675, + 0, + -0.4340354502201081, + 0.4936883449554444, + -0.7535814642906189, + 0, + 0.26162606477737427, + 0.8695039749145508, + 0.41894471645355225, + 0, + -58.558441162109375, + 75.9579086303711, + -174.6970672607422, + 1 + ] + }, + { + "mesh": 6, + "matrix": [ + -0.6893031001091003, + -0.13550062477588654, + 0.7116886973381042, + 0, + -0.6755834221839905, + 0.47497618198394775, + -0.5639013051986694, + 0, + -0.2616262137889862, + -0.8695039749145508, + -0.4189445376396179, + 0, + 134.79489135742188, + 90.9365005493164, + -326.5315856933594, + 1 + ] + }, + { + "mesh": 6, + "matrix": [ + 0.8620702028274536, + -0.015319700352847576, + -0.506557285785675, + 0, + -0.4340354502201081, + 0.4936883449554444, + -0.7535814642906189, + 0, + 0.26162606477737427, + 0.8695039749145508, + 0.41894471645355225, + 0, + -52.27817535400391, + 76.44441223144531, + -179.62875366210938, + 1 + ] + }, + { + "mesh": 6, + "matrix": [ + -0.6893031001091003, + -0.13550062477588654, + 0.7116886973381042, + 0, + -0.6755834221839905, + 0.47497618198394775, + -0.5639013051986694, + 0, + -0.2616262137889862, + -0.8695039749145508, + -0.4189445376396179, + 0, + 141.07516479492188, + 91.42301940917967, + -331.4632263183594, + 1 + ] + }, + { + "mesh": 7, + "matrix": [ + 0.7850325703620911, + 0.06081420928239823, + -0.6164620518684387, + 0, + -0.13561886548995972, + 0.9878994822502136, + -0.07524696737527847, + 0, + 0.6044265031814575, + 0.14267520606517792, + 0.7837808728218079, + 0, + -1.248520016670227, + -250.06192016601565, + -90.22713470458984, + 1 + ] + }, + { + "mesh": 8, + "matrix": [ + 0.7850322723388672, + 0.06081392988562584, + -0.6164625287055969, + 0, + -0.3924780488014221, + -0.7211048007011414, + -0.5709367990493774, + 0, + -0.4792549908161164, + 0.6901518106460571, + -0.5422223806381226, + 0, + 45.59693908691406, + 243.51312255859375, + 69.38878631591797, + 1 + ] + }, + { + "children": [ + 55, + 54, + 49, + 44 + ], + "matrix": [ + 0.9999974370002748, + 0, + -0.0022710729390382767, + 0, + 0, + 1, + 0, + 0, + 0.0022710729390382767, + 0, + 0.9999974370002748, + 0, + -0.13409000635147095, + -12.999988555908203, + 0.12365700304508208, + 1 + ] + }, + { + "children": [ + 48, + 47, + 46, + 45 + ], + "matrix": [ + 1, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 1, + 0, + -0.000007999999979801942, + -0.000011000000085914508, + -99.9995346069336, + 1 + ] + }, + { + "mesh": 9, + "matrix": [ + 0.9999974370002748, + 0, + 0.002270043129101396, + 0, + 0, + 1, + 0, + 0, + -0.002270043129101396, + 0, + 0.9999974370002748, + 0, + 49.7321662902832, + 344.3533630371094, + 79.59915161132812, + 1 + ] + }, + { + "mesh": 10, + "matrix": [ + 0.9999974370002748, + 1.0191530236923485e-13, + 0.0022699993569403887, + 0, + -1.0191530236923485e-13, + -1, + 8.97931243248884e-11, + 0, + 0.0022699993569403887, + -8.97931243248884e-11, + -0.9999974370002748, + 0, + 49.69811248779297, + 257.12298583984375, + 94.59911346435548, + 1 + ] + }, + { + "mesh": 11, + "matrix": [ + 0.9986716508865356, + -4.959933574966158e-10, + -0.051525991410017014, + 0, + -0.051525991410017014, + -1.9239376314317272e-8, + -0.9986716508865356, + 0, + -4.959933574966158e-10, + 1, + -1.9239376314317272e-8, + 0, + 67.53964233398438, + 326.7381286621094, + 297.74066162109375, + 1 + ] + }, + { + "mesh": 11, + "matrix": [ + 0.9986629486083984, + -1.884843170429917e-9, + -0.05169397220015526, + 0, + -0.05169397220015526, + -7.287438563707839e-8, + -0.9986629486083984, + 0, + -1.884843170429917e-9, + 1, + -7.287438563707839e-8, + 0, + 17.574960708618164, + 326.7381286621094, + 297.6291809082031, + 1 + ] + }, + { + "children": [ + 53, + 52, + 51, + 50 + ] + }, + { + "mesh": 9, + "matrix": [ + 0.9999974370002748, + 0, + 0.002270043129101396, + 0, + 0, + 1, + 0, + 0, + -0.002270043129101396, + 0, + 0.9999974370002748, + 0, + 49.50515365600586, + 344.3533630371094, + 79.59934997558594, + 1 + ] + }, + { + "mesh": 10, + "matrix": [ + 0.9999974370002748, + 1.0191530236923485e-13, + 0.0022699993569403887, + 0, + -1.0191530236923485e-13, + -1, + 8.97931243248884e-11, + 0, + 0.0022699993569403887, + -8.97931243248884e-11, + -0.9999974370002748, + 0, + 49.47110366821289, + 257.1229553222656, + 94.59931182861328, + 1 + ] + }, + { + "mesh": 11, + "matrix": [ + 0.9984543323516846, + -6.428584886641885e-10, + -0.05557801201939583, + 0, + -0.05557801201939583, + -2.311567648405344e-8, + -0.9984543323516846, + 0, + -6.428584886641885e-10, + 1, + -2.311567648405344e-8, + 0, + 68.16710662841797, + 326.7381286621094, + 297.7886657714844, + 1 + ] + }, + { + "mesh": 11, + "matrix": [ + 0.998445212841034, + 5.507552747197052e-10, + -0.05574197694659233, + 0, + -0.05574197694659233, + 1.97455189976381e-8, + -0.998445212841034, + 0, + 5.507552747197052e-10, + 1, + 1.97455189976381e-8, + 0, + 18.201759338378906, + 326.7381286621094, + 297.6770324707031, + 1 + ] + }, + { + "mesh": 12, + "matrix": [ + -0.9999873638153076, + -0.004487415309995413, + -0.0022699744440615177, + 0, + 0.004487401805818081, + -0.999989926815033, + 0.000011093182365584651, + 0, + -0.0022700014524161816, + 9.06754564766743e-7, + 0.9999974370002748, + 0, + -29.98022842407227, + 768.69921875, + -20.581327438354492, + 1 + ] + }, + { + "mesh": 12, + "matrix": [ + -0.9999850392341614, + -0.004973169881850481, + -0.002269970485940576, + 0, + 0.004973156377673149, + -0.9999876618385316, + 0.00001164450713986298, + 0, + -0.0022700002882629637, + 3.554153522600245e-7, + 0.9999974370002748, + 0, + 48.794708251953125, + 768.6771850585938, + -20.40250396728516, + 1 + ] + }, + { + "mesh": 13, + "matrix": [ + -0.8213930726051331, + 0.3830228745937348, + 0.4226191341876984, + 0, + -0.3830228745937348, + 0.17860689759254456, + -0.9063073992729188, + 0, + -0.4226191341876984, + -0.9063073992729188, + 4.807415265872805e-8, + 0, + -141.64085388183597, + 388.6727600097656, + 122.00779724121094, + 1 + ] + }, + { + "mesh": 13, + "matrix": [ + -0.8213450312614441, + -0.38299909234046936, + 0.42273396253585815, + 0, + -0.38312679529190075, + -0.1786557286977768, + -0.9062538146972656, + 0, + 0.4226182401180268, + -0.906307816505432, + 7.823305168130901e-7, + 0, + -270.7940673828125, + 145.4340057373047, + 122.04411315917967, + 1 + ] + }, + { + "mesh": 14, + "matrix": [ + 1, + -0.000060999998822808266, + -0.000001000000338535756, + 0, + 0.000001000000338535756, + 9.44896494381453e-10, + 1, + 0, + -0.000060999998822808266, + -1, + 1.0058964772241552e-9, + 0, + 51.087928771972656, + 325.7384033203125, + 91.09835815429688, + 1 + ] + }, + { + "mesh": 15, + "matrix": [ + 1, + 0, + 0, + 0, + 0, + 4.4896641959724086e-11, + -1, + 0, + 0, + 1, + 4.4896641959724086e-11, + 0, + 24.52630043029785, + 213.73817443847656, + 283.0735778808594, + 1 + ] + }, + { + "mesh": 16, + "matrix": [ + 0.9026297926902772, + -0.4304177761077881, + 0, + 0, + 0.4304177761077881, + 0.9026297926902772, + 0, + 0, + 0, + 0, + 1, + 0, + 353.9793395996094, + 330.9040222167969, + -5.389931201934815, + 1 + ] + }, + { + "mesh": 17, + "matrix": [ + 0.9026297330856324, + 0.4304178953170777, + 0, + 0, + -0.4304178953170777, + 0.9026297330856324, + 0, + 0, + 0, + 0, + 1, + 0, + 277.7611083984375, + 580.0806274414062, + -5.3898539543151855, + 1 + ] + }, + { + "mesh": 18, + "matrix": [ + 1, + 0, + 0, + 0, + 0, + 4.4896641959724086e-11, + 1, + 0, + 0, + -1, + 4.4896641959724086e-11, + 0, + 49.58733749389649, + 175.73818969726565, + 89.59835815429688, + 1 + ] + }, + { + "mesh": 19, + "matrix": [ + 0.9999933242797852, + -1.649976856532565e-11, + 0.003653998486697674, + 0, + 0.003653998486697674, + 9.031045244967117e-9, + -0.9999933242797852, + 0, + -1.649976856532565e-11, + 1, + 9.031045244967117e-9, + 0, + 81.19497680664062, + 247.73817443847656, + 220.1460723876953, + 1 + ] + }, + { + "mesh": 19, + "matrix": [ + 0.9999898672103882, + -4.353116391658807e-12, + -0.004497999791055918, + 0, + -0.004497999791055918, + -1.9355692604250407e-9, + -0.9999898672103882, + 0, + -4.353116391658807e-12, + 1, + -1.9355692604250407e-9, + 0, + -23.12494468688965, + 247.73817443847656, + 220.35459899902344, + 1 + ] + }, + { + "mesh": 20, + "matrix": [ + -2.907086411596538e-7, + 0.7071071863174438, + -0.7071064114570618, + 0, + 7.018321639407077e-7, + -0.7071064114570618, + -0.7071071863174438, + 0, + -1, + -7.018321639407077e-7, + -2.907086411596538e-7, + 0, + 65, + 128.71702575683597, + -3.270524024963379, + 1 + ] + }, + { + "mesh": 21, + "matrix": [ + 4.4896641959724086e-11, + 0, + -1, + 0, + 0, + 1, + 0, + 0, + 1, + 0, + 4.4896641959724086e-11, + 0, + 223.9999847412109, + 31.98792839050293, + 23.50873374938965, + 1 + ] + }, + { + "mesh": 22, + "matrix": [ + 4.4896641959724086e-11, + 0, + -1, + 0, + 0, + 1, + 0, + 0, + 1, + 0, + 4.4896641959724086e-11, + 0, + 223.9999847412109, + 30.487924575805664, + 25.008731842041016, + 1 + ] + }, + { + "mesh": 23, + "matrix": [ + 1.1084333451094608e-10, + -0.0009251347510144116, + -0.9999995827674866, + 0, + 2.396268712345773e-7, + -0.9999995827674866, + 0.0009251347510144116, + 0, + -1, + -2.396268712345773e-7, + 1.1084333451094608e-10, + 0, + -295, + 21.498876571655277, + 0.49255698919296265, + 1 + ] + }, + { + "mesh": 23, + "matrix": [ + 5.062397168131838e-10, + 0.002356505487114191, + -0.9999971985816956, + 0, + -4.2965237412317947e-7, + -0.9999971985816956, + -0.002356505487114191, + 0, + -1, + 4.2965237412317947e-7, + 5.062397168131838e-10, + 0, + -295, + 21.60527801513672, + -105.32867431640624, + 1 + ] + }, + { + "mesh": 24, + "matrix": [ + -1, + 6.349337028366264e-11, + 6.349337028366264e-11, + 0, + -6.349337028366264e-11, + -2.220446049250313e-16, + -1, + 0, + -6.349337028366264e-11, + -1, + -2.220446049250313e-16, + 0, + -126.93566131591795, + 0.000003000000106112566, + 36.558841705322266, + 1 + ] + }, + { + "mesh": 25, + "matrix": [ + 1, + 0, + 0, + 0, + 0, + 4.4896641959724086e-11, + 1, + 0, + 0, + -1, + 4.4896641959724086e-11, + 0, + 126.93566131591795, + -0.00005299999975250103, + -36.5588493347168, + 1 + ] + }, + { + "mesh": 26 + }, + { + "mesh": 27, + "matrix": [ + -0.9861037135124208, + 0.16613073647022247, + 0, + 0, + -0.16613073647022247, + -0.9861037135124208, + 0, + 0, + 0, + 0, + 1, + 0, + -71.01806640625, + -22.193012237548828, + -20, + 1 + ] + }, + { + "mesh": 27, + "matrix": [ + 0.9861037135124208, + -0.16613082587718964, + 0, + 0, + 0.16613082587718964, + 0.9861037135124208, + 0, + 0, + 0, + 0, + 1, + 0, + 71.01806640625, + 22.19301414489746, + 22, + 1 + ] + }, + { + "mesh": 28, + "matrix": [ + 4.4896641959724086e-11, + 0, + 1, + 0, + 0, + 1, + 0, + 0, + -1, + 0, + 4.4896641959724086e-11, + 0, + -100, + -66.15117645263672, + -5.627896785736084, + 1 + ] + }, + { + "mesh": 28, + "matrix": [ + 4.4896641959724086e-11, + 0, + 1, + 0, + 0, + 1, + 0, + 0, + -1, + 0, + 4.4896641959724086e-11, + 0, + -100, + -66.15116882324219, + 14.372098922729492, + 1 + ] + }, + { + "mesh": 28, + "matrix": [ + -2.220446049250313e-16, + 6.349337028366264e-11, + 1, + 0, + -6.349337028366264e-11, + -1, + 6.349337028366264e-11, + 0, + 1, + -6.349337028366264e-11, + -2.220446049250313e-16, + 0, + 100, + -133.8488311767578, + 34.37212753295899, + 1 + ] + }, + { + "mesh": 28, + "matrix": [ + -2.220446049250313e-16, + 6.349337028366264e-11, + 1, + 0, + -6.349337028366264e-11, + -1, + 6.349337028366264e-11, + 0, + 1, + -6.349337028366264e-11, + -2.220446049250313e-16, + 0, + 100, + -133.8488311767578, + 54.37212371826172, + 1 + ] + }, + { + "mesh": 23, + "matrix": [ + 5.062397168131838e-10, + 0.002356505487114191, + -0.9999971985816956, + 0, + -4.2965237412317947e-7, + -0.9999971985816956, + -0.002356505487114191, + 0, + -1, + 4.2965237412317947e-7, + 5.062397168131838e-10, + 0, + -295, + 123.80779266357422, + -100.10150146484376, + 1 + ] + }, + { + "mesh": 23, + "matrix": [ + 5.062397168131838e-10, + 0.002356505487114191, + -0.9999971985816956, + 0, + -4.2965237412317947e-7, + -0.9999971985816956, + -0.002356505487114191, + 0, + -1, + 4.2965237412317947e-7, + 5.062397168131838e-10, + 0, + -295, + 132.1395263671875, + -3.9833459854125977, + 1 + ] + }, + { + "matrix": [ + 0.7071067690849304, + -2.1563657526257887e-7, + -0.7071068286895752, + 0, + -0.3312943577766419, + 0.883452296257019, + -0.33129459619522095, + 0, + 0.6246951818466187, + 0.4685211479663849, + 0.6246950030326843, + 0, + 1005.9874267578124, + 766.3170776367188, + 953.3455810546876, + 1 + ], + "camera": 0 + } + ], + "cameras": [ + { + "perspective": { + "aspectRatio": 1, + "yfov": 0.3143463730812073, + "zfar": 1000000, + "znear": 0.04999999701976776 + }, + "type": "perspective" + } + ], + "meshes": [ + { + "primitives": [ + { + "attributes": { + "NORMAL": 1, + "POSITION": 2 + }, + "indices": 0, + "mode": 4, + "material": 0, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 0, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + }, + { + "attributes": { + "NORMAL": 4, + "POSITION": 5 + }, + "indices": 3, + "mode": 4, + "material": 1, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 1, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "Piston_123-844_0_Parts_1" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 7, + "POSITION": 8 + }, + "indices": 6, + "mode": 4, + "material": 2, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 2, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_24" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 10, + "POSITION": 11 + }, + "indices": 9, + "mode": 4, + "material": 3, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 3, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_23" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 13, + "POSITION": 14 + }, + "indices": 12, + "mode": 4, + "material": 4, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 4, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_22" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 16, + "POSITION": 17 + }, + "indices": 15, + "mode": 4, + "material": 5, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 5, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_21" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 19, + "POSITION": 20 + }, + "indices": 18, + "mode": 4, + "material": 6, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 6, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_20" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 22, + "POSITION": 23 + }, + "indices": 21, + "mode": 4, + "material": 7, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 7, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + }, + { + "attributes": { + "NORMAL": 25, + "POSITION": 26 + }, + "indices": 24, + "mode": 4, + "material": 8, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 8, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + }, + { + "attributes": { + "NORMAL": 28, + "POSITION": 29 + }, + "indices": 27, + "mode": 4, + "material": 9, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 9, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "Spring_Link__0_Parts_1" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 31, + "POSITION": 32 + }, + "indices": 30, + "mode": 4, + "material": 10, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 10, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_19" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 34, + "POSITION": 35 + }, + "indices": 33, + "mode": 4, + "material": 11, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 11, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_18" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 37, + "POSITION": 38 + }, + "indices": 36, + "mode": 4, + "material": 12, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 12, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_17" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 40, + "POSITION": 41 + }, + "indices": 39, + "mode": 4, + "material": 13, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 13, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_16" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 43, + "POSITION": 44 + }, + "indices": 42, + "mode": 4, + "material": 14, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 14, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_15" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 46, + "POSITION": 47 + }, + "indices": 45, + "mode": 4, + "material": 15, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 15, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_14" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 49, + "POSITION": 50 + }, + "indices": 48, + "mode": 4, + "material": 16, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 16, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_13" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 52, + "POSITION": 53 + }, + "indices": 51, + "mode": 4, + "material": 17, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 17, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_12" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 55, + "POSITION": 56 + }, + "indices": 54, + "mode": 4, + "material": 18, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 18, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_11" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 58, + "POSITION": 59 + }, + "indices": 57, + "mode": 4, + "material": 19, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 19, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_10" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 61, + "POSITION": 62 + }, + "indices": 60, + "mode": 4, + "material": 20, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 20, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_9" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 64, + "POSITION": 65 + }, + "indices": 63, + "mode": 4, + "material": 21, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 21, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_8" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 67, + "POSITION": 68 + }, + "indices": 66, + "mode": 4, + "material": 22, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 22, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_7" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 70, + "POSITION": 71 + }, + "indices": 69, + "mode": 4, + "material": 23, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 23, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_6" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 73, + "POSITION": 74 + }, + "indices": 72, + "mode": 4, + "material": 24, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 24, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_5" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 76, + "POSITION": 77 + }, + "indices": 75, + "mode": 4, + "material": 25, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 25, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_4" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 79, + "POSITION": 80 + }, + "indices": 78, + "mode": 4, + "material": 26, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 26, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 82, + "POSITION": 83 + }, + "indices": 81, + "mode": 4, + "material": 27, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 27, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_3" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 85, + "POSITION": 86 + }, + "indices": 84, + "mode": 4, + "material": 28, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 28, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_2" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 88, + "POSITION": 89 + }, + "indices": 87, + "mode": 4, + "material": 29, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 29, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "body_1" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 91, + "POSITION": 92 + }, + "indices": 90, + "mode": 4, + "material": 30, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 30, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + }, + { + "attributes": { + "NORMAL": 94, + "POSITION": 95 + }, + "indices": 93, + "mode": 4, + "material": 31, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 31, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "rod_123-699_0_Parts_1" + }, + { + "primitives": [ + { + "attributes": { + "NORMAL": 97, + "POSITION": 98 + }, + "indices": 96, + "mode": 4, + "material": 32, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 32, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + }, + { + "attributes": { + "NORMAL": 100, + "POSITION": 101 + }, + "indices": 99, + "mode": 4, + "material": 33, + "extensions": { + "KHR_draco_mesh_compression": { + "bufferView": 33, + "attributes": { + "NORMAL": 0, + "POSITION": 1 + } + } + } + } + ], + "name": "Lifter_123-923_0_Parts_1" + } + ], + "accessors": [ + { + "componentType": 5123, + "count": 8250, + "max": [ + 2011 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 2019, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 2019, + "max": [ + 63.40448572208197, + 56.08839293136558, + 46.562239366912465 + ], + "min": [ + -43.18272974063667, + -37.02586557437691, + -46.552019138830026 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 5034, + "max": [ + 1295 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 1296, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.0085841421987496 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.0071021323110545 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 1296, + "max": [ + 53.40421474812726, + 56.071585904462886, + 46.48897622932617 + ], + "min": [ + 31.823853243486322, + -37.01927877555665, + -46.51102377067383 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 792, + "max": [ + 263 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 264, + "max": [ + 1.007843137254902, + 1.0085841421987496, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.0071021323110545, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 264, + "max": [ + -242.44321321264732, + 107.60525894351395, + 85.04152418172936 + ], + "min": [ + -264.53407789266686, + 85.55591844522377, + -0.04152418172936004 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 1530, + "max": [ + 669 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 670, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 670, + "max": [ + 36.0150846938826, + 35.77556925980009, + 10.029517132302988 + ], + "min": [ + -36.0150846938826, + -36.008523414585724, + -15.035153816197054 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 5640, + "max": [ + 1552 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 1553, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 1553, + "max": [ + 63.07857825014145, + 35.09330727894479, + 113.6238397655105 + ], + "min": [ + -63.114142805061284, + -35.1238397655105, + -140.1238397655105 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 7716, + "max": [ + 2469 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 2470, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 2470, + "max": [ + 69.04801049765925, + -31.053647338091963, + 10.071605568696185 + ], + "min": [ + -69.04801049765925, + -168.94747796592097, + -15.067396789163162 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 4098, + "max": [ + 1381 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 1382, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 1382, + "max": [ + 177.51338138775995, + -14.20547458143659, + 19.069568473767465 + ], + "min": [ + -76.2342905064123, + -55.691794778873245, + -19.19691791730097 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 123, + "max": [ + 42 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 43, + "max": [ + 0.9906806646608843, + 1.0070963205075736, + 0.01101498323328376 + ], + "min": [ + -1.0078373254514208, + -1.0070963205075734, + -1.0078373254514208 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 43, + "max": [ + 131.15477841018057, + -35.545531343237386, + 5.002442598925256 + ], + "min": [ + 129.4474017614263, + -37.25290799199166, + -0.002442598925256473 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 123, + "max": [ + 42 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 43, + "max": [ + 1.0070963205075736, + 1.0088496505045428, + 0.01101498323328376 + ], + "min": [ + -1.0070963205075734, + -0.9896683396077623, + -1.0078373254514208 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 43, + "max": [ + 131.1979414917052, + -61.503653814691305, + 5.002442598925256 + ], + "min": [ + 129.40507388056693, + -63.29652142582955, + -0.002442598925256473 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 4842, + "max": [ + 869 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 870, + "max": [ + 1.007843137254902, + 1.0085841421987496, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.0071021323110545, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 870, + "max": [ + 132.31635282932984, + -34.386640562682715, + 15.020499208466157 + ], + "min": [ + 128.28646279779014, + -64.4129848340458, + -0.014654145569235276 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 870, + "max": [ + 293 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 294, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.0085841421987496 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.0071021323110545 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 294, + "max": [ + 181.61274291738744, + 312.1717225425583, + 18.017225856249894 + ], + "min": [ + 71.50526092782741, + 276.06033752694856, + -18.040421979277088 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 1860, + "max": [ + 623 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 624, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.0085841421987496 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.0071021323110545 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 624, + "max": [ + 211.18447328091366, + 300.2211685217935, + 15.996206279842227 + ], + "min": [ + 202.14881133660492, + 268.18990325555023, + -16.003793720157773 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 1140, + "max": [ + 407 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 408, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 408, + "max": [ + 54.5927169438275, + -38.58734297088489, + 15.041035661944308 + ], + "min": [ + -44.50400997361265, + -80.66355205735289, + -0.04836345872007816 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 972, + "max": [ + 339 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 340, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 340, + "max": [ + 54.5927169438275, + -48.557050929514446, + 15.041035661944308 + ], + "min": [ + -44.50400997361265, + -61.66354824265562, + -0.04836345872007816 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 924, + "max": [ + 323 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 324, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 324, + "max": [ + 30.590223082694763, + 217.20610309520697, + 0.03957010258915261 + ], + "min": [ + 15.514013996226764, + 202.12989400873897, + -81.03957010258915 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 276, + "max": [ + 95 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 96, + "max": [ + 1.007843137254902, + 1.0085841421987496, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.0071021323110545, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 96, + "max": [ + -36.645295939331355, + 469.697984852905, + 115.05618740840252 + ], + "min": [ + -49.735184448197465, + 456.60809634403887, + -0.056179779008009044 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 1044, + "max": [ + 347 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 348, + "max": [ + 1.007843137254902, + 1.0085841421987496, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.0071021323110545, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 348, + "max": [ + -278.36243962379683, + -55.606046437168914, + 65.03175378602833 + ], + "min": [ + -296.43034387391896, + -73.64219690126271, + -0.03175378602833415 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 4956, + "max": [ + 1408 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 1413, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 1413, + "max": [ + 105.10791280622533, + 60.12821201982295, + 75.09822780246203 + ], + "min": [ + -98.09043386335424, + -124.12875631208472, + 2.208630534457648 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 8016, + "max": [ + 1939 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 1942, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 1942, + "max": [ + 40.90842442589384, + 233.18429666620733, + 70.04825886669074 + ], + "min": [ + 19.24019328174675, + 214.74940959034052, + -28.834158982657552 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 9060, + "max": [ + 1753 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 1755, + "max": [ + 1.007843137254902, + 1.0078551177885018, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007831156721302, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 1755, + "max": [ + -252.50384356000683, + -164.048902242313, + 85.04152418172936 + ], + "min": [ + -330.486256847745, + -199.21988416708095, + -0.04152418172936004 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 9036, + "max": [ + 1748 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 1750, + "max": [ + 1.007843137254902, + 1.0078551177885018, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007831156721302, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 1750, + "max": [ + -248.42355181196, + -164.048902242313, + 85.04152418172936 + ], + "min": [ + -326.40596509969816, + -199.21988416708095, + -0.04152418172936004 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 17556, + "max": [ + 5473 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 5481, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 5481, + "max": [ + 113.11425593030935, + 59.855536590859444, + 0.1265035907873937 + ], + "min": [ + -103.09678461683279, + -120.79517176282887, + -82.49587061536795 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 1080, + "max": [ + 371 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 372, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 372, + "max": [ + 37.66338539496246, + 226.16108321770844, + 0.01221434663549914 + ], + "min": [ + 13.639934536067813, + 202.1610813103598, + -17.011724475773008 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 672, + "max": [ + 239 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 240, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 240, + "max": [ + -25.825387825194838, + 156.20804847317555, + 0.11235955801600994 + ], + "min": [ + -189.08382562246916, + -7.050389324098782, + -230.1123748168051 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 4974, + "max": [ + 1413 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 1417, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 1417, + "max": [ + 105.10791280622533, + 60.12821201982295, + 147.79042774791185 + ], + "min": [ + -98.09043386335424, + -124.12875631208472, + 74.90083047990748 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 17094, + "max": [ + 5363 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 5370, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 5370, + "max": [ + 113.11425593030935, + 59.855536590859444, + 82.51410393259917 + ], + "min": [ + -103.09678461683279, + -120.79517176282887, + -0.10827027355617846 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 564, + "max": [ + 191 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 192, + "max": [ + 1.007843137254902, + 1.0085841421987496, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.0071021323110545, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 192, + "max": [ + -25.82539396188174, + 81.21283311941708, + 0.11235955801600994 + ], + "min": [ + -39.083821807771876, + 67.95440527352694, + -230.1123748168051 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 27684, + "max": [ + 8617 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 8618, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 8618, + "max": [ + 99.19769106530887, + 151.65165664419408, + 80.1123499066642 + ], + "min": [ + -62.028483884184325, + -38.53396941885229, + -80.09281875356908 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 27684, + "max": [ + 8617 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 8618, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 8618, + "max": [ + 99.19769106530887, + 151.65165664419408, + 80.1123499066642 + ], + "min": [ + -62.028483884184325, + -38.53396941885229, + -80.09281875356908 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 14874, + "max": [ + 3671 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 3677, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 3677, + "max": [ + 95.09045272998597, + 90.13237496708217, + 113.11231703168892 + ], + "min": [ + -95.13237496708216, + -181.10393258426967, + -120.13237496708216 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 3903, + "max": [ + 1280 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 1289, + "max": [ + 1.007843137254902, + 1.007843137254902, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.007843137254902, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 1289, + "max": [ + 111.14250709746136, + 30.73910905421507, + 10.093795798729847 + ], + "min": [ + -71.0353141042973, + -43.41233207915083, + -10.088910600879336 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 960, + "max": [ + 179 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 180, + "max": [ + 1.007843137254902, + 1.0085841421987496, + 1.007843137254902 + ], + "min": [ + -1.007843137254902, + -1.0071021323110545, + -1.007843137254902 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 180, + "max": [ + 76.1074352818812, + 2.5916090654523174, + 7.037701616068263 + ], + "min": [ + -11.853575761922698, + -15.223822649129728, + -7.042928751119475 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 423, + "max": [ + 142 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 143, + "max": [ + 1.007843137254902, + 1.0085841421987496, + 1.011764705882353 + ], + "min": [ + -1.007843137254902, + -1.0071021323110545, + -0.00784313725490196 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 143, + "max": [ + -16.588934008994798, + -26.062883441367845, + 0.03972677911964695 + ], + "min": [ + -32.16183142389504, + -41.635780856268084, + -81.3604436370298 + ], + "type": "VEC3" + }, + { + "componentType": 5123, + "count": 2163, + "max": [ + 385 + ], + "min": [ + 0 + ], + "type": "SCALAR" + }, + { + "componentType": 5126, + "count": 386, + "max": [ + 0.782551365740159, + 0.7822283681701212, + -0.6225824215832878 + ], + "min": [ + -0.7819053706000832, + -0.7822283681701211, + -1.0060873802970438 + ], + "type": "VEC3" + }, + { + "componentType": 5126, + "count": 386, + "max": [ + -16.616233825683587, + -26.091251526102532, + -81.31242564601553 + ], + "min": [ + -32.123733367696296, + -41.60632308937599, + -85.00757202126074 + ], + "type": "VEC3" + } + ], + "materials": [ + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0.8500000238418579, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_20", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0, + 0, + 0, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_21", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0.5609999895095825, + 0, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_17", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0.5609999895095825, + 0, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_17", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0, + 0.42500001192092896, + 0.42500001192092896, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_22", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0.5609999895095825, + 0, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_17", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0, + 0.42500001192092896, + 0.42500001192092896, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_22", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0.5609999895095825, + 0, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_17", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0, + 0, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_18", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0, + 0.5609999895095825, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_23", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0, + 0, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_16", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0, + 0, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_16", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0.8500000238418579, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_20", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0.8500000238418579, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_20", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0, + 0, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_16", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0, + 0, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_16", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0.5609999895095825, + 0, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_17", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0, + 0.5609999895095825, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_23", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0.8500000238418579, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_20", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0.5609999895095825, + 0, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_17", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0.5609999895095825, + 0, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_17", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0.8500000238418579, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_20", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0, + 0, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_16", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0, + 0.42500001192092896, + 0.42500001192092896, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_22", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0, + 0.5609999895095825, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_23", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0.8500000238418579, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_20", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0, + 0, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_16", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0.8500000238418579, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_20", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0.8500000238418579, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_20", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0.8500000238418579, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_20", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0.8500000238418579, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_19", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0.8500000238418579, + 0.8500000238418579, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_20", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0.5609999895095825, + 0, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_17", + "alphaMode": "OPAQUE", + "doubleSided": false + }, + { + "pbrMetallicRoughness": { + "baseColorFactor": [ + 0.8500000238418579, + 0, + 0, + 1 + ], + "metallicFactor": 0, + "roughnessFactor": 1 + }, + "emissiveFactor": [ + 0, + 0, + 0 + ], + "name": "Material_18", + "alphaMode": "OPAQUE", + "doubleSided": false + } + ], + "bufferViews": [ + { + "buffer": 0, + "byteOffset": 0, + "byteLength": 7048 + }, + { + "buffer": 0, + "byteOffset": 7048, + "byteLength": 3418 + }, + { + "buffer": 0, + "byteOffset": 10468, + "byteLength": 749 + }, + { + "buffer": 0, + "byteOffset": 11220, + "byteLength": 1576 + }, + { + "buffer": 0, + "byteOffset": 12796, + "byteLength": 4477 + }, + { + "buffer": 0, + "byteOffset": 17276, + "byteLength": 6111 + }, + { + "buffer": 0, + "byteOffset": 23388, + "byteLength": 3514 + }, + { + "buffer": 0, + "byteOffset": 26904, + "byteLength": 296 + }, + { + "buffer": 0, + "byteOffset": 27200, + "byteLength": 298 + }, + { + "buffer": 0, + "byteOffset": 27500, + "byteLength": 2063 + }, + { + "buffer": 0, + "byteOffset": 29564, + "byteLength": 879 + }, + { + "buffer": 0, + "byteOffset": 30444, + "byteLength": 1716 + }, + { + "buffer": 0, + "byteOffset": 32160, + "byteLength": 1119 + }, + { + "buffer": 0, + "byteOffset": 33280, + "byteLength": 936 + }, + { + "buffer": 0, + "byteOffset": 34216, + "byteLength": 839 + }, + { + "buffer": 0, + "byteOffset": 35056, + "byteLength": 352 + }, + { + "buffer": 0, + "byteOffset": 35408, + "byteLength": 984 + }, + { + "buffer": 0, + "byteOffset": 36392, + "byteLength": 4746 + }, + { + "buffer": 0, + "byteOffset": 41140, + "byteLength": 4936 + }, + { + "buffer": 0, + "byteOffset": 46076, + "byteLength": 6002 + }, + { + "buffer": 0, + "byteOffset": 52080, + "byteLength": 5976 + }, + { + "buffer": 0, + "byteOffset": 58056, + "byteLength": 14759 + }, + { + "buffer": 0, + "byteOffset": 72816, + "byteLength": 1052 + }, + { + "buffer": 0, + "byteOffset": 73868, + "byteLength": 690 + }, + { + "buffer": 0, + "byteOffset": 74560, + "byteLength": 4754 + }, + { + "buffer": 0, + "byteOffset": 79316, + "byteLength": 14406 + }, + { + "buffer": 0, + "byteOffset": 93724, + "byteLength": 494 + }, + { + "buffer": 0, + "byteOffset": 94220, + "byteLength": 21649 + }, + { + "buffer": 0, + "byteOffset": 115872, + "byteLength": 21709 + }, + { + "buffer": 0, + "byteOffset": 137584, + "byteLength": 11596 + }, + { + "buffer": 0, + "byteOffset": 149180, + "byteLength": 3313 + }, + { + "buffer": 0, + "byteOffset": 152496, + "byteLength": 936 + }, + { + "buffer": 0, + "byteOffset": 153432, + "byteLength": 419 + }, + { + "buffer": 0, + "byteOffset": 153852, + "byteLength": 1540 + } + ], + "buffers": [ + { + "name": "2CylinderEngine", + "byteLength": 155392, + "uri": "2CylinderEngine.bin" + } + ], + "extensionsRequired": [ + "KHR_draco_mesh_compression" + ], + "extensionsUsed": [ + "KHR_draco_mesh_compression" + ] +} From deac3a9f6c6a4417144586ed28a1f474fea97984 Mon Sep 17 00:00:00 2001 From: RichardTea <31507749+RichardTea@users.noreply.github.com> Date: Wed, 20 Jan 2021 10:14:20 +0000 Subject: [PATCH 02/30] Add Draco 1.4.1 sources Upstream: https://github.com/google/draco --- contrib/draco/.clang-format | 5 + contrib/draco/.cmake-format.py | 102 ++ contrib/draco/.gitignore | 1 + contrib/draco/.ruby-version | 1 + contrib/draco/.travis.yml | 31 + contrib/draco/AUTHORS | 7 + contrib/draco/BUILDING.md | 301 ++++ contrib/draco/CMAKE.md | 106 ++ contrib/draco/CMakeLists.txt | 958 +++++++++++++ contrib/draco/CONTRIBUTING.md | 27 + contrib/draco/LICENSE | 252 ++++ contrib/draco/README.md | 478 +++++++ contrib/draco/cmake/DracoConfig.cmake | 3 + contrib/draco/cmake/FindDraco.cmake | 56 + contrib/draco/cmake/compiler_flags.cmake | 220 +++ contrib/draco/cmake/compiler_tests.cmake | 103 ++ .../draco/cmake/draco-config.cmake.template | 2 + contrib/draco/cmake/draco.pc.template | 11 + .../draco/cmake/draco_build_definitions.cmake | 117 ++ contrib/draco/cmake/draco_cpu_detection.cmake | 28 + contrib/draco/cmake/draco_emscripten.cmake | 185 +++ contrib/draco/cmake/draco_features.cmake | 63 + contrib/draco/cmake/draco_flags.cmake | 238 ++++ contrib/draco/cmake/draco_helpers.cmake | 110 ++ contrib/draco/cmake/draco_install.cmake | 79 ++ contrib/draco/cmake/draco_intrinsics.cmake | 96 ++ contrib/draco/cmake/draco_options.cmake | 239 ++++ contrib/draco/cmake/draco_sanitizer.cmake | 32 + contrib/draco/cmake/draco_targets.cmake | 349 +++++ contrib/draco/cmake/draco_test_config.h.cmake | 13 + contrib/draco/cmake/draco_tests.cmake | 133 ++ contrib/draco/cmake/draco_variables.cmake | 64 + contrib/draco/cmake/sanitizers.cmake | 19 + .../cmake/toolchains/aarch64-linux-gnu.cmake | 14 + .../cmake/toolchains/android-ndk-common.cmake | 23 + contrib/draco/cmake/toolchains/android.cmake | 39 + .../cmake/toolchains/arm-ios-common.cmake | 17 + .../toolchains/arm-linux-gnueabihf.cmake | 15 + .../toolchains/arm64-android-ndk-libcpp.cmake | 16 + .../draco/cmake/toolchains/arm64-ios.cmake | 14 + .../cmake/toolchains/arm64-linux-gcc.cmake | 18 + .../toolchains/armv7-android-ndk-libcpp.cmake | 16 + .../draco/cmake/toolchains/armv7-ios.cmake | 14 + .../cmake/toolchains/armv7-linux-gcc.cmake | 24 + .../draco/cmake/toolchains/armv7s-ios.cmake | 14 + contrib/draco/cmake/toolchains/i386-ios.cmake | 15 + .../toolchains/x86-android-ndk-libcpp.cmake | 16 + .../x86_64-android-ndk-libcpp.cmake | 16 + .../draco/cmake/toolchains/x86_64-ios.cmake | 15 + contrib/draco/cmake/util.cmake | 79 ++ .../src/draco/animation/keyframe_animation.cc | 54 + .../src/draco/animation/keyframe_animation.h | 107 ++ .../animation/keyframe_animation_decoder.cc | 30 + .../animation/keyframe_animation_decoder.h | 34 + .../animation/keyframe_animation_encoder.cc | 28 + .../animation/keyframe_animation_encoder.h | 39 + .../keyframe_animation_encoding_test.cc | 168 +++ .../animation/keyframe_animation_test.cc | 102 ++ .../attribute_octahedron_transform.cc | 145 ++ .../attribute_octahedron_transform.h | 81 ++ .../attribute_quantization_transform.cc | 260 ++++ .../attribute_quantization_transform.h | 102 ++ .../draco/attributes/attribute_transform.cc | 40 + .../draco/attributes/attribute_transform.h | 76 + .../attributes/attribute_transform_data.h | 71 + .../attributes/attribute_transform_type.h | 30 + .../draco/attributes/geometry_attribute.cc | 102 ++ .../src/draco/attributes/geometry_attribute.h | 350 +++++ .../src/draco/attributes/geometry_indices.h | 54 + .../src/draco/attributes/point_attribute.cc | 225 +++ .../src/draco/attributes/point_attribute.h | 190 +++ .../draco/attributes/point_attribute_test.cc | 128 ++ .../attributes/attributes_decoder.cc | 127 ++ .../attributes/attributes_decoder.h | 97 ++ .../attributes/attributes_decoder_interface.h | 62 + .../attributes/attributes_encoder.cc | 49 + .../attributes/attributes_encoder.h | 154 +++ .../attributes/kd_tree_attributes_decoder.cc | 556 ++++++++ .../attributes/kd_tree_attributes_decoder.h | 46 + .../attributes/kd_tree_attributes_encoder.cc | 305 ++++ .../attributes/kd_tree_attributes_encoder.h | 51 + .../attributes/kd_tree_attributes_shared.h | 28 + .../compression/attributes/linear_sequencer.h | 51 + .../mesh_attribute_indices_encoding_data.h | 58 + .../attributes/normal_compression_utils.h | 360 +++++ .../compression/attributes/point_d_vector.h | 279 ++++ .../attributes/point_d_vector_test.cc | 360 +++++ .../compression/attributes/points_sequencer.h | 63 + ..._constrained_multi_parallelogram_decoder.h | 231 ++++ ..._constrained_multi_parallelogram_encoder.h | 414 ++++++ ...e_constrained_multi_parallelogram_shared.h | 34 + .../mesh_prediction_scheme_data.h | 72 + .../mesh_prediction_scheme_decoder.h | 46 + .../mesh_prediction_scheme_encoder.h | 46 + ...ediction_scheme_geometric_normal_decoder.h | 172 +++ ...ediction_scheme_geometric_normal_encoder.h | 180 +++ ...n_scheme_geometric_normal_predictor_area.h | 117 ++ ...n_scheme_geometric_normal_predictor_base.h | 96 ++ ...ction_scheme_multi_parallelogram_decoder.h | 126 ++ ...ction_scheme_multi_parallelogram_encoder.h | 133 ++ ..._prediction_scheme_parallelogram_decoder.h | 98 ++ ..._prediction_scheme_parallelogram_encoder.h | 111 ++ ...h_prediction_scheme_parallelogram_shared.h | 78 ++ ...esh_prediction_scheme_tex_coords_decoder.h | 344 +++++ ...esh_prediction_scheme_tex_coords_encoder.h | 318 +++++ ...ction_scheme_tex_coords_portable_decoder.h | 143 ++ ...ction_scheme_tex_coords_portable_encoder.h | 133 ++ ...ion_scheme_tex_coords_portable_predictor.h | 263 ++++ .../prediction_scheme_decoder.h | 90 ++ .../prediction_scheme_decoder_factory.h | 194 +++ .../prediction_scheme_decoder_interface.h | 53 + .../prediction_scheme_decoding_transform.h | 65 + .../prediction_scheme_delta_decoder.h | 65 + .../prediction_scheme_delta_encoder.h | 69 + .../prediction_scheme_encoder.h | 90 ++ .../prediction_scheme_encoder_factory.cc | 85 ++ .../prediction_scheme_encoder_factory.h | 129 ++ .../prediction_scheme_encoder_interface.h | 55 + .../prediction_scheme_encoding_transform.h | 77 ++ .../prediction_scheme_factory.h | 85 ++ .../prediction_scheme_interface.h | 60 + ...ahedron_canonicalized_decoding_transform.h | 118 ++ ...ahedron_canonicalized_encoding_transform.h | 116 ++ ..._octahedron_canonicalized_transform_base.h | 102 ++ ...octahedron_canonicalized_transform_test.cc | 192 +++ ...eme_normal_octahedron_decoding_transform.h | 103 ++ ...eme_normal_octahedron_encoding_transform.h | 105 ++ ..._scheme_normal_octahedron_transform_base.h | 90 ++ ...scheme_normal_octahedron_transform_test.cc | 71 + ...rediction_scheme_wrap_decoding_transform.h | 88 ++ ...rediction_scheme_wrap_encoding_transform.h | 81 ++ .../prediction_scheme_wrap_transform_base.h | 120 ++ .../sequential_attribute_decoder.cc | 118 ++ .../attributes/sequential_attribute_decoder.h | 86 ++ ...equential_attribute_decoders_controller.cc | 149 ++ ...sequential_attribute_decoders_controller.h | 61 + .../sequential_attribute_encoder.cc | 108 ++ .../attributes/sequential_attribute_encoder.h | 134 ++ ...equential_attribute_encoders_controller.cc | 159 +++ ...sequential_attribute_encoders_controller.h | 115 ++ .../sequential_integer_attribute_decoder.cc | 240 ++++ .../sequential_integer_attribute_decoder.h | 76 + .../sequential_integer_attribute_encoder.cc | 233 ++++ .../sequential_integer_attribute_encoder.h | 67 + ...uential_integer_attribute_encoding_test.cc | 64 + .../sequential_normal_attribute_decoder.cc | 76 + .../sequential_normal_attribute_decoder.h | 83 ++ .../sequential_normal_attribute_encoder.cc | 57 + .../sequential_normal_attribute_encoder.h | 82 ++ ...quential_quantization_attribute_decoder.cc | 88 ++ ...equential_quantization_attribute_decoder.h | 52 + ...quential_quantization_attribute_encoder.cc | 86 ++ ...equential_quantization_attribute_encoder.h | 52 + .../adaptive_rans_bit_coding_shared.h | 43 + .../bit_coders/adaptive_rans_bit_decoder.cc | 70 + .../bit_coders/adaptive_rans_bit_decoder.h | 54 + .../bit_coders/adaptive_rans_bit_encoder.cc | 59 + .../bit_coders/adaptive_rans_bit_encoder.h | 61 + .../bit_coders/direct_bit_decoder.cc | 54 + .../bit_coders/direct_bit_decoder.h | 90 ++ .../bit_coders/direct_bit_encoder.cc | 39 + .../bit_coders/direct_bit_encoder.h | 89 ++ .../bit_coders/folded_integer_bit_decoder.h | 77 ++ .../bit_coders/folded_integer_bit_encoder.h | 82 ++ .../bit_coders/rans_bit_decoder.cc | 82 ++ .../compression/bit_coders/rans_bit_decoder.h | 55 + .../bit_coders/rans_bit_encoder.cc | 125 ++ .../compression/bit_coders/rans_bit_encoder.h | 57 + .../bit_coders/rans_coding_test.cc | 9 + .../bit_coders/symbol_bit_decoder.cc | 49 + .../bit_coders/symbol_bit_decoder.h | 36 + .../bit_coders/symbol_bit_encoder.cc | 30 + .../bit_coders/symbol_bit_encoder.h | 36 + .../compression/config/compression_shared.h | 155 +++ .../compression/config/decoder_options.h | 34 + .../config/decoder_options_test.cc | 67 + .../draco/compression/config/draco_options.h | 249 ++++ .../compression/config/encoder_options.h | 97 ++ .../compression/config/encoding_features.h | 39 + contrib/draco/src/draco/compression/decode.cc | 135 ++ contrib/draco/src/draco/compression/decode.h | 80 ++ .../src/draco/compression/decode_test.cc | 169 +++ contrib/draco/src/draco/compression/encode.cc | 96 ++ contrib/draco/src/draco/compression/encode.h | 140 ++ .../draco/src/draco/compression/encode_base.h | 131 ++ .../src/draco/compression/encode_test.cc | 407 ++++++ .../draco/src/draco/compression/entropy/ans.h | 527 +++++++ .../compression/entropy/rans_symbol_coding.h | 53 + .../compression/entropy/rans_symbol_decoder.h | 164 +++ .../compression/entropy/rans_symbol_encoder.h | 290 ++++ .../compression/entropy/shannon_entropy.cc | 147 ++ .../compression/entropy/shannon_entropy.h | 110 ++ .../entropy/shannon_entropy_test.cc | 58 + .../compression/entropy/symbol_coding_test.cc | 170 +++ .../compression/entropy/symbol_decoding.cc | 181 +++ .../compression/entropy/symbol_decoding.h | 29 + .../compression/entropy/symbol_encoding.cc | 376 +++++ .../compression/entropy/symbol_encoding.h | 47 + .../src/draco/compression/expert_encode.cc | 182 +++ .../src/draco/compression/expert_encode.h | 147 ++ .../draco/compression/mesh/mesh_decoder.cc | 37 + .../src/draco/compression/mesh/mesh_decoder.h | 68 + .../mesh/mesh_edgebreaker_decoder.cc | 70 + .../mesh/mesh_edgebreaker_decoder.h | 54 + .../mesh/mesh_edgebreaker_decoder_impl.cc | 1231 +++++++++++++++++ .../mesh/mesh_edgebreaker_decoder_impl.h | 228 +++ .../mesh_edgebreaker_decoder_impl_interface.h | 47 + .../mesh/mesh_edgebreaker_encoder.cc | 195 +++ .../mesh/mesh_edgebreaker_encoder.h | 73 + .../mesh/mesh_edgebreaker_encoder_impl.cc | 854 ++++++++++++ .../mesh/mesh_edgebreaker_encoder_impl.h | 210 +++ .../mesh_edgebreaker_encoder_impl_interface.h | 57 + .../mesh/mesh_edgebreaker_encoding_test.cc | 247 ++++ .../mesh/mesh_edgebreaker_shared.h | 131 ++ .../mesh/mesh_edgebreaker_traversal_decoder.h | 201 +++ .../mesh/mesh_edgebreaker_traversal_encoder.h | 139 ++ ...edgebreaker_traversal_predictive_decoder.h | 134 ++ ...edgebreaker_traversal_predictive_encoder.h | 172 +++ ...sh_edgebreaker_traversal_valence_decoder.h | 215 +++ ...sh_edgebreaker_traversal_valence_encoder.h | 226 +++ .../draco/compression/mesh/mesh_encoder.cc | 34 + .../src/draco/compression/mesh/mesh_encoder.h | 84 ++ .../compression/mesh/mesh_encoder_test.cc | 116 ++ .../mesh/mesh_sequential_decoder.cc | 169 +++ .../mesh/mesh_sequential_decoder.h | 39 + .../mesh/mesh_sequential_encoder.cc | 132 ++ .../mesh/mesh_sequential_encoder.h | 57 + .../mesh/traverser/depth_first_traverser.h | 172 +++ .../max_prediction_degree_traverser.h | 226 +++ ...mesh_attribute_indices_encoding_observer.h | 76 + .../mesh/traverser/mesh_traversal_sequencer.h | 113 ++ .../mesh/traverser/traverser_base.h | 87 ++ .../dynamic_integer_points_kd_tree_decoder.cc | 26 + .../dynamic_integer_points_kd_tree_decoder.h | 330 +++++ .../dynamic_integer_points_kd_tree_encoder.cc | 26 + .../dynamic_integer_points_kd_tree_encoder.h | 371 +++++ .../algorithms/float_points_tree_decoder.cc | 152 ++ .../algorithms/float_points_tree_decoder.h | 141 ++ .../algorithms/float_points_tree_encoder.cc | 94 ++ .../algorithms/float_points_tree_encoder.h | 126 ++ .../integer_points_kd_tree_decoder.cc | 45 + .../integer_points_kd_tree_decoder.h | 314 +++++ .../integer_points_kd_tree_encoder.cc | 45 + .../integer_points_kd_tree_encoder.h | 404 ++++++ .../point_cloud_compression_method.h | 34 + .../algorithms/point_cloud_types.h | 76 + .../algorithms/quantize_points_3.h | 84 ++ .../point_cloud/algorithms/queuing_policy.h | 75 + .../point_cloud/point_cloud_decoder.cc | 199 +++ .../point_cloud/point_cloud_decoder.h | 118 ++ .../point_cloud/point_cloud_encoder.cc | 306 ++++ .../point_cloud/point_cloud_encoder.h | 158 +++ .../point_cloud_kd_tree_decoder.cc | 40 + .../point_cloud/point_cloud_kd_tree_decoder.h | 31 + .../point_cloud_kd_tree_encoder.cc | 43 + .../point_cloud/point_cloud_kd_tree_encoder.h | 45 + .../point_cloud_kd_tree_encoding_test.cc | 458 ++++++ .../point_cloud_sequential_decoder.cc | 42 + .../point_cloud_sequential_decoder.h | 33 + .../point_cloud_sequential_encoder.cc | 49 + .../point_cloud_sequential_encoder.h | 43 + .../point_cloud_sequential_encoding_test.cc | 92 ++ contrib/draco/src/draco/core/bit_utils.cc | 36 + contrib/draco/src/draco/core/bit_utils.h | 124 ++ contrib/draco/src/draco/core/bounding_box.cc | 30 + contrib/draco/src/draco/core/bounding_box.h | 72 + .../src/draco/core/buffer_bit_coding_test.cc | 115 ++ contrib/draco/src/draco/core/cycle_timer.cc | 49 + contrib/draco/src/draco/core/cycle_timer.h | 50 + contrib/draco/src/draco/core/data_buffer.cc | 61 + contrib/draco/src/draco/core/data_buffer.h | 82 ++ .../draco/src/draco/core/decoder_buffer.cc | 72 + contrib/draco/src/draco/core/decoder_buffer.h | 216 +++ contrib/draco/src/draco/core/divide.cc | 88 ++ contrib/draco/src/draco/core/divide.h | 42 + .../draco/src/draco/core/draco_index_type.h | 183 +++ .../src/draco/core/draco_index_type_vector.h | 83 ++ .../draco/src/draco/core/draco_test_base.h | 11 + .../draco/src/draco/core/draco_test_utils.cc | 80 ++ .../draco/src/draco/core/draco_test_utils.h | 93 ++ contrib/draco/src/draco/core/draco_types.cc | 61 + contrib/draco/src/draco/core/draco_types.h | 52 + contrib/draco/src/draco/core/draco_version.h | 27 + .../draco/src/draco/core/encoder_buffer.cc | 93 ++ contrib/draco/src/draco/core/encoder_buffer.h | 152 ++ contrib/draco/src/draco/core/hash_utils.cc | 58 + contrib/draco/src/draco/core/hash_utils.h | 64 + contrib/draco/src/draco/core/macros.h | 119 ++ contrib/draco/src/draco/core/math_utils.h | 55 + .../draco/src/draco/core/math_utils_test.cc | 22 + contrib/draco/src/draco/core/options.cc | 94 ++ contrib/draco/src/draco/core/options.h | 150 ++ .../src/draco/core/quantization_utils.cc | 42 + .../draco/src/draco/core/quantization_utils.h | 82 ++ .../src/draco/core/quantization_utils_test.cc | 91 ++ contrib/draco/src/draco/core/status.h | 77 ++ contrib/draco/src/draco/core/status_or.h | 81 ++ contrib/draco/src/draco/core/status_test.cc | 38 + .../draco/src/draco/core/varint_decoding.h | 81 ++ .../draco/src/draco/core/varint_encoding.h | 61 + contrib/draco/src/draco/core/vector_d.h | 355 +++++ contrib/draco/src/draco/core/vector_d_test.cc | 306 ++++ .../draco/src/draco/io/file_reader_factory.cc | 45 + .../draco/src/draco/io/file_reader_factory.h | 34 + .../src/draco/io/file_reader_factory_test.cc | 85 ++ .../src/draco/io/file_reader_interface.h | 32 + .../src/draco/io/file_reader_test_common.h | 13 + contrib/draco/src/draco/io/file_utils.cc | 110 ++ contrib/draco/src/draco/io/file_utils.h | 73 + contrib/draco/src/draco/io/file_utils_test.cc | 69 + .../draco/src/draco/io/file_writer_factory.cc | 45 + .../draco/src/draco/io/file_writer_factory.h | 34 + .../src/draco/io/file_writer_factory_test.cc | 70 + .../src/draco/io/file_writer_interface.h | 26 + .../draco/src/draco/io/file_writer_utils.cc | 57 + .../draco/src/draco/io/file_writer_utils.h | 38 + contrib/draco/src/draco/io/mesh_io.cc | 87 ++ contrib/draco/src/draco/io/mesh_io.h | 107 ++ contrib/draco/src/draco/io/obj_decoder.cc | 708 ++++++++++ contrib/draco/src/draco/io/obj_decoder.h | 129 ++ .../draco/src/draco/io/obj_decoder_test.cc | 193 +++ contrib/draco/src/draco/io/obj_encoder.cc | 346 +++++ contrib/draco/src/draco/io/obj_encoder.h | 92 ++ .../draco/src/draco/io/obj_encoder_test.cc | 110 ++ contrib/draco/src/draco/io/parser_utils.cc | 260 ++++ contrib/draco/src/draco/io/parser_utils.h | 66 + contrib/draco/src/draco/io/ply_decoder.cc | 320 +++++ contrib/draco/src/draco/io/ply_decoder.h | 69 + .../draco/src/draco/io/ply_decoder_test.cc | 93 ++ contrib/draco/src/draco/io/ply_encoder.cc | 211 +++ contrib/draco/src/draco/io/ply_encoder.h | 54 + .../draco/src/draco/io/ply_property_reader.h | 96 ++ .../draco/src/draco/io/ply_property_writer.h | 94 ++ contrib/draco/src/draco/io/ply_reader.cc | 312 +++++ contrib/draco/src/draco/io/ply_reader.h | 155 +++ contrib/draco/src/draco/io/ply_reader_test.cc | 143 ++ contrib/draco/src/draco/io/point_cloud_io.cc | 58 + contrib/draco/src/draco/io/point_cloud_io.h | 89 ++ .../draco/src/draco/io/point_cloud_io_test.cc | 115 ++ .../draco/src/draco/io/stdio_file_reader.cc | 96 ++ .../draco/src/draco/io/stdio_file_reader.h | 48 + .../src/draco/io/stdio_file_reader_test.cc | 49 + .../draco/src/draco/io/stdio_file_writer.cc | 59 + .../draco/src/draco/io/stdio_file_writer.h | 42 + .../src/draco/io/stdio_file_writer_test.cc | 38 + .../animation_decoder_webidl_wrapper.cc | 101 ++ .../animation_decoder_webidl_wrapper.h | 73 + .../animation_encoder_webidl_wrapper.cc | 89 ++ .../animation_encoder_webidl_wrapper.h | 66 + .../emscripten/decoder_functions.js | 33 + .../emscripten/decoder_webidl_wrapper.cc | 363 +++++ .../emscripten/decoder_webidl_wrapper.h | 330 +++++ .../draco_animation_decoder_glue_wrapper.cc | 28 + .../draco_animation_encoder_glue_wrapper.cc | 25 + .../draco_animation_web_decoder.idl | 52 + .../draco_animation_web_encoder.idl | 34 + .../emscripten/draco_decoder_glue_wrapper.cc | 28 + .../emscripten/draco_encoder_glue_wrapper.cc | 25 + .../emscripten/draco_web_decoder.idl | 283 ++++ .../emscripten/draco_web_encoder.idl | 208 +++ .../emscripten/encoder_webidl_wrapper.cc | 359 +++++ .../emscripten/encoder_webidl_wrapper.h | 186 +++ .../draco/javascript/emscripten/finalize.js | 22 + .../javascript/emscripten/prepareCallbacks.js | 38 + .../draco/javascript/emscripten/version.js | 29 + .../draco/src/draco/maya/draco_maya_plugin.cc | 265 ++++ .../draco/src/draco/maya/draco_maya_plugin.h | 81 ++ contrib/draco/src/draco/mesh/corner_table.cc | 441 ++++++ contrib/draco/src/draco/mesh/corner_table.h | 396 ++++++ .../src/draco/mesh/corner_table_iterators.h | 289 ++++ contrib/draco/src/draco/mesh/mesh.cc | 40 + contrib/draco/src/draco/mesh/mesh.h | 152 ++ .../src/draco/mesh/mesh_are_equivalent.cc | 205 +++ .../src/draco/mesh/mesh_are_equivalent.h | 71 + .../draco/mesh/mesh_are_equivalent_test.cc | 98 ++ .../draco/mesh/mesh_attribute_corner_table.cc | 211 +++ .../draco/mesh/mesh_attribute_corner_table.h | 196 +++ contrib/draco/src/draco/mesh/mesh_cleanup.cc | 251 ++++ contrib/draco/src/draco/mesh/mesh_cleanup.h | 65 + .../draco/src/draco/mesh/mesh_cleanup_test.cc | 192 +++ .../src/draco/mesh/mesh_misc_functions.cc | 63 + .../src/draco/mesh/mesh_misc_functions.h | 98 ++ .../draco/src/draco/mesh/mesh_stripifier.cc | 102 ++ .../draco/src/draco/mesh/mesh_stripifier.h | 260 ++++ .../draco/mesh/triangle_soup_mesh_builder.cc | 89 ++ .../draco/mesh/triangle_soup_mesh_builder.h | 63 + .../mesh/triangle_soup_mesh_builder_test.cc | 197 +++ contrib/draco/src/draco/mesh/valence_cache.h | 142 ++ .../src/draco/metadata/geometry_metadata.cc | 44 + .../src/draco/metadata/geometry_metadata.h | 140 ++ contrib/draco/src/draco/metadata/metadata.cc | 132 ++ contrib/draco/src/draco/metadata/metadata.h | 208 +++ .../src/draco/metadata/metadata_decoder.cc | 148 ++ .../src/draco/metadata/metadata_decoder.h | 42 + .../src/draco/metadata/metadata_encoder.cc | 97 ++ .../src/draco/metadata/metadata_encoder.h | 41 + .../draco/metadata/metadata_encoder_test.cc | 167 +++ .../draco/src/draco/metadata/metadata_test.cc | 157 +++ .../src/draco/point_cloud/point_cloud.cc | 275 ++++ .../draco/src/draco/point_cloud/point_cloud.h | 244 ++++ .../draco/point_cloud/point_cloud_builder.cc | 76 + .../draco/point_cloud/point_cloud_builder.h | 80 ++ .../point_cloud/point_cloud_builder_test.cc | 171 +++ .../src/draco/point_cloud/point_cloud_test.cc | 132 ++ .../draco/src/draco/tools/draco_decoder.cc | 168 +++ .../draco/src/draco/tools/draco_encoder.cc | 369 +++++ contrib/draco/src/draco/tools/fuzz/build.sh | 35 + .../tools/fuzz/draco_mesh_decoder_fuzzer.cc | 29 + ...h_decoder_without_dequantization_fuzzer.cc | 30 + .../tools/fuzz/draco_pc_decoder_fuzzer.cc | 29 + ...c_decoder_without_dequantization_fuzzer.cc | 30 + .../src/draco/unity/draco_unity_plugin.cc | 407 ++++++ .../src/draco/unity/draco_unity_plugin.h | 154 +++ .../draco/unity/draco_unity_plugin_test.cc | 243 ++++ 414 files changed, 51476 insertions(+) create mode 100644 contrib/draco/.clang-format create mode 100644 contrib/draco/.cmake-format.py create mode 100644 contrib/draco/.gitignore create mode 100644 contrib/draco/.ruby-version create mode 100644 contrib/draco/.travis.yml create mode 100644 contrib/draco/AUTHORS create mode 100644 contrib/draco/BUILDING.md create mode 100644 contrib/draco/CMAKE.md create mode 100644 contrib/draco/CMakeLists.txt create mode 100644 contrib/draco/CONTRIBUTING.md create mode 100644 contrib/draco/LICENSE create mode 100644 contrib/draco/README.md create mode 100644 contrib/draco/cmake/DracoConfig.cmake create mode 100644 contrib/draco/cmake/FindDraco.cmake create mode 100644 contrib/draco/cmake/compiler_flags.cmake create mode 100644 contrib/draco/cmake/compiler_tests.cmake create mode 100644 contrib/draco/cmake/draco-config.cmake.template create mode 100644 contrib/draco/cmake/draco.pc.template create mode 100644 contrib/draco/cmake/draco_build_definitions.cmake create mode 100644 contrib/draco/cmake/draco_cpu_detection.cmake create mode 100644 contrib/draco/cmake/draco_emscripten.cmake create mode 100644 contrib/draco/cmake/draco_features.cmake create mode 100644 contrib/draco/cmake/draco_flags.cmake create mode 100644 contrib/draco/cmake/draco_helpers.cmake create mode 100644 contrib/draco/cmake/draco_install.cmake create mode 100644 contrib/draco/cmake/draco_intrinsics.cmake create mode 100644 contrib/draco/cmake/draco_options.cmake create mode 100644 contrib/draco/cmake/draco_sanitizer.cmake create mode 100644 contrib/draco/cmake/draco_targets.cmake create mode 100644 contrib/draco/cmake/draco_test_config.h.cmake create mode 100644 contrib/draco/cmake/draco_tests.cmake create mode 100644 contrib/draco/cmake/draco_variables.cmake create mode 100644 contrib/draco/cmake/sanitizers.cmake create mode 100644 contrib/draco/cmake/toolchains/aarch64-linux-gnu.cmake create mode 100644 contrib/draco/cmake/toolchains/android-ndk-common.cmake create mode 100644 contrib/draco/cmake/toolchains/android.cmake create mode 100644 contrib/draco/cmake/toolchains/arm-ios-common.cmake create mode 100644 contrib/draco/cmake/toolchains/arm-linux-gnueabihf.cmake create mode 100644 contrib/draco/cmake/toolchains/arm64-android-ndk-libcpp.cmake create mode 100644 contrib/draco/cmake/toolchains/arm64-ios.cmake create mode 100644 contrib/draco/cmake/toolchains/arm64-linux-gcc.cmake create mode 100644 contrib/draco/cmake/toolchains/armv7-android-ndk-libcpp.cmake create mode 100644 contrib/draco/cmake/toolchains/armv7-ios.cmake create mode 100644 contrib/draco/cmake/toolchains/armv7-linux-gcc.cmake create mode 100644 contrib/draco/cmake/toolchains/armv7s-ios.cmake create mode 100644 contrib/draco/cmake/toolchains/i386-ios.cmake create mode 100644 contrib/draco/cmake/toolchains/x86-android-ndk-libcpp.cmake create mode 100644 contrib/draco/cmake/toolchains/x86_64-android-ndk-libcpp.cmake create mode 100644 contrib/draco/cmake/toolchains/x86_64-ios.cmake create mode 100644 contrib/draco/cmake/util.cmake create mode 100644 contrib/draco/src/draco/animation/keyframe_animation.cc create mode 100644 contrib/draco/src/draco/animation/keyframe_animation.h create mode 100644 contrib/draco/src/draco/animation/keyframe_animation_decoder.cc create mode 100644 contrib/draco/src/draco/animation/keyframe_animation_decoder.h create mode 100644 contrib/draco/src/draco/animation/keyframe_animation_encoder.cc create mode 100644 contrib/draco/src/draco/animation/keyframe_animation_encoder.h create mode 100644 contrib/draco/src/draco/animation/keyframe_animation_encoding_test.cc create mode 100644 contrib/draco/src/draco/animation/keyframe_animation_test.cc create mode 100644 contrib/draco/src/draco/attributes/attribute_octahedron_transform.cc create mode 100644 contrib/draco/src/draco/attributes/attribute_octahedron_transform.h create mode 100644 contrib/draco/src/draco/attributes/attribute_quantization_transform.cc create mode 100644 contrib/draco/src/draco/attributes/attribute_quantization_transform.h create mode 100644 contrib/draco/src/draco/attributes/attribute_transform.cc create mode 100644 contrib/draco/src/draco/attributes/attribute_transform.h create mode 100644 contrib/draco/src/draco/attributes/attribute_transform_data.h create mode 100644 contrib/draco/src/draco/attributes/attribute_transform_type.h create mode 100644 contrib/draco/src/draco/attributes/geometry_attribute.cc create mode 100644 contrib/draco/src/draco/attributes/geometry_attribute.h create mode 100644 contrib/draco/src/draco/attributes/geometry_indices.h create mode 100644 contrib/draco/src/draco/attributes/point_attribute.cc create mode 100644 contrib/draco/src/draco/attributes/point_attribute.h create mode 100644 contrib/draco/src/draco/attributes/point_attribute_test.cc create mode 100644 contrib/draco/src/draco/compression/attributes/attributes_decoder.cc create mode 100644 contrib/draco/src/draco/compression/attributes/attributes_decoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/attributes_decoder_interface.h create mode 100644 contrib/draco/src/draco/compression/attributes/attributes_encoder.cc create mode 100644 contrib/draco/src/draco/compression/attributes/attributes_encoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.cc create mode 100644 contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.cc create mode 100644 contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/kd_tree_attributes_shared.h create mode 100644 contrib/draco/src/draco/compression/attributes/linear_sequencer.h create mode 100644 contrib/draco/src/draco/compression/attributes/mesh_attribute_indices_encoding_data.h create mode 100644 contrib/draco/src/draco/compression/attributes/normal_compression_utils.h create mode 100644 contrib/draco/src/draco/compression/attributes/point_d_vector.h create mode 100644 contrib/draco/src/draco/compression/attributes/point_d_vector_test.cc create mode 100644 contrib/draco/src/draco/compression/attributes/points_sequencer.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_decoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_encoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_encoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_decoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_decoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_encoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_decoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_interface.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoding_transform.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_decoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_encoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_interface.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoding_transform.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_encoding_transform.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_decoding_transform.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_encoding_transform.h create mode 100644 contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.cc create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.cc create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.h create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.cc create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.cc create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.h create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.cc create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.cc create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoding_test.cc create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.cc create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.cc create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.cc create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.h create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.cc create mode 100644 contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.h create mode 100644 contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_coding_shared.h create mode 100644 contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.cc create mode 100644 contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.h create mode 100644 contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.cc create mode 100644 contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.h create mode 100644 contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.cc create mode 100644 contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.h create mode 100644 contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.cc create mode 100644 contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.h create mode 100644 contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_decoder.h create mode 100644 contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_encoder.h create mode 100644 contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.cc create mode 100644 contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.h create mode 100644 contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.cc create mode 100644 contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.h create mode 100644 contrib/draco/src/draco/compression/bit_coders/rans_coding_test.cc create mode 100644 contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.cc create mode 100644 contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.h create mode 100644 contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.cc create mode 100644 contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.h create mode 100644 contrib/draco/src/draco/compression/config/compression_shared.h create mode 100644 contrib/draco/src/draco/compression/config/decoder_options.h create mode 100644 contrib/draco/src/draco/compression/config/decoder_options_test.cc create mode 100644 contrib/draco/src/draco/compression/config/draco_options.h create mode 100644 contrib/draco/src/draco/compression/config/encoder_options.h create mode 100644 contrib/draco/src/draco/compression/config/encoding_features.h create mode 100644 contrib/draco/src/draco/compression/decode.cc create mode 100644 contrib/draco/src/draco/compression/decode.h create mode 100644 contrib/draco/src/draco/compression/decode_test.cc create mode 100644 contrib/draco/src/draco/compression/encode.cc create mode 100644 contrib/draco/src/draco/compression/encode.h create mode 100644 contrib/draco/src/draco/compression/encode_base.h create mode 100644 contrib/draco/src/draco/compression/encode_test.cc create mode 100644 contrib/draco/src/draco/compression/entropy/ans.h create mode 100644 contrib/draco/src/draco/compression/entropy/rans_symbol_coding.h create mode 100644 contrib/draco/src/draco/compression/entropy/rans_symbol_decoder.h create mode 100644 contrib/draco/src/draco/compression/entropy/rans_symbol_encoder.h create mode 100644 contrib/draco/src/draco/compression/entropy/shannon_entropy.cc create mode 100644 contrib/draco/src/draco/compression/entropy/shannon_entropy.h create mode 100644 contrib/draco/src/draco/compression/entropy/shannon_entropy_test.cc create mode 100644 contrib/draco/src/draco/compression/entropy/symbol_coding_test.cc create mode 100644 contrib/draco/src/draco/compression/entropy/symbol_decoding.cc create mode 100644 contrib/draco/src/draco/compression/entropy/symbol_decoding.h create mode 100644 contrib/draco/src/draco/compression/entropy/symbol_encoding.cc create mode 100644 contrib/draco/src/draco/compression/entropy/symbol_encoding.h create mode 100644 contrib/draco/src/draco/compression/expert_encode.cc create mode 100644 contrib/draco/src/draco/compression/expert_encode.h create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_decoder.cc create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_decoder.h create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder.cc create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder.h create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder_impl.cc create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder_impl.h create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder_impl_interface.h create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder.cc create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder.h create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.cc create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.h create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl_interface.h create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoding_test.cc create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_shared.h create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_decoder.h create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_encoder.h create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_encoder.cc create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_encoder.h create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_encoder_test.cc create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_sequential_decoder.cc create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_sequential_decoder.h create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_sequential_encoder.cc create mode 100644 contrib/draco/src/draco/compression/mesh/mesh_sequential_encoder.h create mode 100644 contrib/draco/src/draco/compression/mesh/traverser/depth_first_traverser.h create mode 100644 contrib/draco/src/draco/compression/mesh/traverser/max_prediction_degree_traverser.h create mode 100644 contrib/draco/src/draco/compression/mesh/traverser/mesh_attribute_indices_encoding_observer.h create mode 100644 contrib/draco/src/draco/compression/mesh/traverser/mesh_traversal_sequencer.h create mode 100644 contrib/draco/src/draco/compression/mesh/traverser/traverser_base.h create mode 100644 contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.cc create mode 100644 contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h create mode 100644 contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.cc create mode 100644 contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.h create mode 100644 contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_decoder.cc create mode 100644 contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_decoder.h create mode 100644 contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_encoder.cc create mode 100644 contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_encoder.h create mode 100644 contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_decoder.cc create mode 100644 contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_decoder.h create mode 100644 contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_encoder.cc create mode 100644 contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_encoder.h create mode 100644 contrib/draco/src/draco/compression/point_cloud/algorithms/point_cloud_compression_method.h create mode 100644 contrib/draco/src/draco/compression/point_cloud/algorithms/point_cloud_types.h create mode 100644 contrib/draco/src/draco/compression/point_cloud/algorithms/quantize_points_3.h create mode 100644 contrib/draco/src/draco/compression/point_cloud/algorithms/queuing_policy.h create mode 100644 contrib/draco/src/draco/compression/point_cloud/point_cloud_decoder.cc create mode 100644 contrib/draco/src/draco/compression/point_cloud/point_cloud_decoder.h create mode 100644 contrib/draco/src/draco/compression/point_cloud/point_cloud_encoder.cc create mode 100644 contrib/draco/src/draco/compression/point_cloud/point_cloud_encoder.h create mode 100644 contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_decoder.cc create mode 100644 contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_decoder.h create mode 100644 contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_encoder.cc create mode 100644 contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_encoder.h create mode 100644 contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_encoding_test.cc create mode 100644 contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_decoder.cc create mode 100644 contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_decoder.h create mode 100644 contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_encoder.cc create mode 100644 contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_encoder.h create mode 100644 contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_encoding_test.cc create mode 100644 contrib/draco/src/draco/core/bit_utils.cc create mode 100644 contrib/draco/src/draco/core/bit_utils.h create mode 100644 contrib/draco/src/draco/core/bounding_box.cc create mode 100644 contrib/draco/src/draco/core/bounding_box.h create mode 100644 contrib/draco/src/draco/core/buffer_bit_coding_test.cc create mode 100644 contrib/draco/src/draco/core/cycle_timer.cc create mode 100644 contrib/draco/src/draco/core/cycle_timer.h create mode 100644 contrib/draco/src/draco/core/data_buffer.cc create mode 100644 contrib/draco/src/draco/core/data_buffer.h create mode 100644 contrib/draco/src/draco/core/decoder_buffer.cc create mode 100644 contrib/draco/src/draco/core/decoder_buffer.h create mode 100644 contrib/draco/src/draco/core/divide.cc create mode 100644 contrib/draco/src/draco/core/divide.h create mode 100644 contrib/draco/src/draco/core/draco_index_type.h create mode 100644 contrib/draco/src/draco/core/draco_index_type_vector.h create mode 100644 contrib/draco/src/draco/core/draco_test_base.h create mode 100644 contrib/draco/src/draco/core/draco_test_utils.cc create mode 100644 contrib/draco/src/draco/core/draco_test_utils.h create mode 100644 contrib/draco/src/draco/core/draco_types.cc create mode 100644 contrib/draco/src/draco/core/draco_types.h create mode 100644 contrib/draco/src/draco/core/draco_version.h create mode 100644 contrib/draco/src/draco/core/encoder_buffer.cc create mode 100644 contrib/draco/src/draco/core/encoder_buffer.h create mode 100644 contrib/draco/src/draco/core/hash_utils.cc create mode 100644 contrib/draco/src/draco/core/hash_utils.h create mode 100644 contrib/draco/src/draco/core/macros.h create mode 100644 contrib/draco/src/draco/core/math_utils.h create mode 100644 contrib/draco/src/draco/core/math_utils_test.cc create mode 100644 contrib/draco/src/draco/core/options.cc create mode 100644 contrib/draco/src/draco/core/options.h create mode 100644 contrib/draco/src/draco/core/quantization_utils.cc create mode 100644 contrib/draco/src/draco/core/quantization_utils.h create mode 100644 contrib/draco/src/draco/core/quantization_utils_test.cc create mode 100644 contrib/draco/src/draco/core/status.h create mode 100644 contrib/draco/src/draco/core/status_or.h create mode 100644 contrib/draco/src/draco/core/status_test.cc create mode 100644 contrib/draco/src/draco/core/varint_decoding.h create mode 100644 contrib/draco/src/draco/core/varint_encoding.h create mode 100644 contrib/draco/src/draco/core/vector_d.h create mode 100644 contrib/draco/src/draco/core/vector_d_test.cc create mode 100644 contrib/draco/src/draco/io/file_reader_factory.cc create mode 100644 contrib/draco/src/draco/io/file_reader_factory.h create mode 100644 contrib/draco/src/draco/io/file_reader_factory_test.cc create mode 100644 contrib/draco/src/draco/io/file_reader_interface.h create mode 100644 contrib/draco/src/draco/io/file_reader_test_common.h create mode 100644 contrib/draco/src/draco/io/file_utils.cc create mode 100644 contrib/draco/src/draco/io/file_utils.h create mode 100644 contrib/draco/src/draco/io/file_utils_test.cc create mode 100644 contrib/draco/src/draco/io/file_writer_factory.cc create mode 100644 contrib/draco/src/draco/io/file_writer_factory.h create mode 100644 contrib/draco/src/draco/io/file_writer_factory_test.cc create mode 100644 contrib/draco/src/draco/io/file_writer_interface.h create mode 100644 contrib/draco/src/draco/io/file_writer_utils.cc create mode 100644 contrib/draco/src/draco/io/file_writer_utils.h create mode 100644 contrib/draco/src/draco/io/mesh_io.cc create mode 100644 contrib/draco/src/draco/io/mesh_io.h create mode 100644 contrib/draco/src/draco/io/obj_decoder.cc create mode 100644 contrib/draco/src/draco/io/obj_decoder.h create mode 100644 contrib/draco/src/draco/io/obj_decoder_test.cc create mode 100644 contrib/draco/src/draco/io/obj_encoder.cc create mode 100644 contrib/draco/src/draco/io/obj_encoder.h create mode 100644 contrib/draco/src/draco/io/obj_encoder_test.cc create mode 100644 contrib/draco/src/draco/io/parser_utils.cc create mode 100644 contrib/draco/src/draco/io/parser_utils.h create mode 100644 contrib/draco/src/draco/io/ply_decoder.cc create mode 100644 contrib/draco/src/draco/io/ply_decoder.h create mode 100644 contrib/draco/src/draco/io/ply_decoder_test.cc create mode 100644 contrib/draco/src/draco/io/ply_encoder.cc create mode 100644 contrib/draco/src/draco/io/ply_encoder.h create mode 100644 contrib/draco/src/draco/io/ply_property_reader.h create mode 100644 contrib/draco/src/draco/io/ply_property_writer.h create mode 100644 contrib/draco/src/draco/io/ply_reader.cc create mode 100644 contrib/draco/src/draco/io/ply_reader.h create mode 100644 contrib/draco/src/draco/io/ply_reader_test.cc create mode 100644 contrib/draco/src/draco/io/point_cloud_io.cc create mode 100644 contrib/draco/src/draco/io/point_cloud_io.h create mode 100644 contrib/draco/src/draco/io/point_cloud_io_test.cc create mode 100644 contrib/draco/src/draco/io/stdio_file_reader.cc create mode 100644 contrib/draco/src/draco/io/stdio_file_reader.h create mode 100644 contrib/draco/src/draco/io/stdio_file_reader_test.cc create mode 100644 contrib/draco/src/draco/io/stdio_file_writer.cc create mode 100644 contrib/draco/src/draco/io/stdio_file_writer.h create mode 100644 contrib/draco/src/draco/io/stdio_file_writer_test.cc create mode 100644 contrib/draco/src/draco/javascript/emscripten/animation_decoder_webidl_wrapper.cc create mode 100644 contrib/draco/src/draco/javascript/emscripten/animation_decoder_webidl_wrapper.h create mode 100644 contrib/draco/src/draco/javascript/emscripten/animation_encoder_webidl_wrapper.cc create mode 100644 contrib/draco/src/draco/javascript/emscripten/animation_encoder_webidl_wrapper.h create mode 100644 contrib/draco/src/draco/javascript/emscripten/decoder_functions.js create mode 100644 contrib/draco/src/draco/javascript/emscripten/decoder_webidl_wrapper.cc create mode 100644 contrib/draco/src/draco/javascript/emscripten/decoder_webidl_wrapper.h create mode 100644 contrib/draco/src/draco/javascript/emscripten/draco_animation_decoder_glue_wrapper.cc create mode 100644 contrib/draco/src/draco/javascript/emscripten/draco_animation_encoder_glue_wrapper.cc create mode 100644 contrib/draco/src/draco/javascript/emscripten/draco_animation_web_decoder.idl create mode 100644 contrib/draco/src/draco/javascript/emscripten/draco_animation_web_encoder.idl create mode 100644 contrib/draco/src/draco/javascript/emscripten/draco_decoder_glue_wrapper.cc create mode 100644 contrib/draco/src/draco/javascript/emscripten/draco_encoder_glue_wrapper.cc create mode 100644 contrib/draco/src/draco/javascript/emscripten/draco_web_decoder.idl create mode 100644 contrib/draco/src/draco/javascript/emscripten/draco_web_encoder.idl create mode 100644 contrib/draco/src/draco/javascript/emscripten/encoder_webidl_wrapper.cc create mode 100644 contrib/draco/src/draco/javascript/emscripten/encoder_webidl_wrapper.h create mode 100644 contrib/draco/src/draco/javascript/emscripten/finalize.js create mode 100644 contrib/draco/src/draco/javascript/emscripten/prepareCallbacks.js create mode 100644 contrib/draco/src/draco/javascript/emscripten/version.js create mode 100644 contrib/draco/src/draco/maya/draco_maya_plugin.cc create mode 100644 contrib/draco/src/draco/maya/draco_maya_plugin.h create mode 100644 contrib/draco/src/draco/mesh/corner_table.cc create mode 100644 contrib/draco/src/draco/mesh/corner_table.h create mode 100644 contrib/draco/src/draco/mesh/corner_table_iterators.h create mode 100644 contrib/draco/src/draco/mesh/mesh.cc create mode 100644 contrib/draco/src/draco/mesh/mesh.h create mode 100644 contrib/draco/src/draco/mesh/mesh_are_equivalent.cc create mode 100644 contrib/draco/src/draco/mesh/mesh_are_equivalent.h create mode 100644 contrib/draco/src/draco/mesh/mesh_are_equivalent_test.cc create mode 100644 contrib/draco/src/draco/mesh/mesh_attribute_corner_table.cc create mode 100644 contrib/draco/src/draco/mesh/mesh_attribute_corner_table.h create mode 100644 contrib/draco/src/draco/mesh/mesh_cleanup.cc create mode 100644 contrib/draco/src/draco/mesh/mesh_cleanup.h create mode 100644 contrib/draco/src/draco/mesh/mesh_cleanup_test.cc create mode 100644 contrib/draco/src/draco/mesh/mesh_misc_functions.cc create mode 100644 contrib/draco/src/draco/mesh/mesh_misc_functions.h create mode 100644 contrib/draco/src/draco/mesh/mesh_stripifier.cc create mode 100644 contrib/draco/src/draco/mesh/mesh_stripifier.h create mode 100644 contrib/draco/src/draco/mesh/triangle_soup_mesh_builder.cc create mode 100644 contrib/draco/src/draco/mesh/triangle_soup_mesh_builder.h create mode 100644 contrib/draco/src/draco/mesh/triangle_soup_mesh_builder_test.cc create mode 100644 contrib/draco/src/draco/mesh/valence_cache.h create mode 100644 contrib/draco/src/draco/metadata/geometry_metadata.cc create mode 100644 contrib/draco/src/draco/metadata/geometry_metadata.h create mode 100644 contrib/draco/src/draco/metadata/metadata.cc create mode 100644 contrib/draco/src/draco/metadata/metadata.h create mode 100644 contrib/draco/src/draco/metadata/metadata_decoder.cc create mode 100644 contrib/draco/src/draco/metadata/metadata_decoder.h create mode 100644 contrib/draco/src/draco/metadata/metadata_encoder.cc create mode 100644 contrib/draco/src/draco/metadata/metadata_encoder.h create mode 100644 contrib/draco/src/draco/metadata/metadata_encoder_test.cc create mode 100644 contrib/draco/src/draco/metadata/metadata_test.cc create mode 100644 contrib/draco/src/draco/point_cloud/point_cloud.cc create mode 100644 contrib/draco/src/draco/point_cloud/point_cloud.h create mode 100644 contrib/draco/src/draco/point_cloud/point_cloud_builder.cc create mode 100644 contrib/draco/src/draco/point_cloud/point_cloud_builder.h create mode 100644 contrib/draco/src/draco/point_cloud/point_cloud_builder_test.cc create mode 100644 contrib/draco/src/draco/point_cloud/point_cloud_test.cc create mode 100644 contrib/draco/src/draco/tools/draco_decoder.cc create mode 100644 contrib/draco/src/draco/tools/draco_encoder.cc create mode 100644 contrib/draco/src/draco/tools/fuzz/build.sh create mode 100644 contrib/draco/src/draco/tools/fuzz/draco_mesh_decoder_fuzzer.cc create mode 100644 contrib/draco/src/draco/tools/fuzz/draco_mesh_decoder_without_dequantization_fuzzer.cc create mode 100644 contrib/draco/src/draco/tools/fuzz/draco_pc_decoder_fuzzer.cc create mode 100644 contrib/draco/src/draco/tools/fuzz/draco_pc_decoder_without_dequantization_fuzzer.cc create mode 100644 contrib/draco/src/draco/unity/draco_unity_plugin.cc create mode 100644 contrib/draco/src/draco/unity/draco_unity_plugin.h create mode 100644 contrib/draco/src/draco/unity/draco_unity_plugin_test.cc diff --git a/contrib/draco/.clang-format b/contrib/draco/.clang-format new file mode 100644 index 000000000..533d35e6d --- /dev/null +++ b/contrib/draco/.clang-format @@ -0,0 +1,5 @@ +--- +Language: Cpp +BasedOnStyle: Google +PointerAlignment: Right +... diff --git a/contrib/draco/.cmake-format.py b/contrib/draco/.cmake-format.py new file mode 100644 index 000000000..64f2495b4 --- /dev/null +++ b/contrib/draco/.cmake-format.py @@ -0,0 +1,102 @@ +# Generated with cmake-format 0.5.1 +# How wide to allow formatted cmake files +line_width = 80 + +# How many spaces to tab for indent +tab_size = 2 + +# If arglists are longer than this, break them always +max_subargs_per_line = 10 + +# If true, separate flow control names from their parentheses with a space +separate_ctrl_name_with_space = False + +# If true, separate function names from parentheses with a space +separate_fn_name_with_space = False + +# If a statement is wrapped to more than one line, than dangle the closing +# parenthesis on its own line +dangle_parens = False + +# What character to use for bulleted lists +bullet_char = '*' + +# What character to use as punctuation after numerals in an enumerated list +enum_char = '.' + +# What style line endings to use in the output. +line_ending = u'unix' + +# Format command names consistently as 'lower' or 'upper' case +command_case = u'lower' + +# Format keywords consistently as 'lower' or 'upper' case +keyword_case = u'unchanged' + +# Specify structure for custom cmake functions +additional_commands = { + "foo": { + "flags": [ + "BAR", + "BAZ" + ], + "kwargs": { + "HEADERS": "*", + "DEPENDS": "*", + "SOURCES": "*" + } + } +} + +# A list of command names which should always be wrapped +always_wrap = [] + +# Specify the order of wrapping algorithms during successive reflow attempts +algorithm_order = [0, 1, 2, 3, 4] + +# If true, the argument lists which are known to be sortable will be sorted +# lexicographicall +autosort = False + +# enable comment markup parsing and reflow +enable_markup = True + +# If comment markup is enabled, don't reflow the first comment block in +# eachlistfile. Use this to preserve formatting of your +# copyright/licensestatements. +first_comment_is_literal = False + +# If comment markup is enabled, don't reflow any comment block which matchesthis +# (regex) pattern. Default is `None` (disabled). +literal_comment_pattern = None + +# Regular expression to match preformat fences in comments +# default=r'^\s*([`~]{3}[`~]*)(.*)$' +fence_pattern = u'^\\s*([`~]{3}[`~]*)(.*)$' + +# Regular expression to match rulers in comments +# default=r'^\s*[^\w\s]{3}.*[^\w\s]{3}$' +ruler_pattern = u'^\\s*[^\\w\\s]{3}.*[^\\w\\s]{3}$' + +# If true, emit the unicode byte-order mark (BOM) at the start of the file +emit_byteorder_mark = False + +# If a comment line starts with at least this many consecutive hash characters, +# then don't lstrip() them off. This allows for lazy hash rulers where the first +# hash char is not separated by space +hashruler_min_length = 10 + +# If true, then insert a space between the first hash char and remaining hash +# chars in a hash ruler, and normalize its length to fill the column +canonicalize_hashrulers = True + +# Specify the encoding of the input file. Defaults to utf-8. +input_encoding = u'utf-8' + +# Specify the encoding of the output file. Defaults to utf-8. Note that cmake +# only claims to support utf-8 so be careful when using anything else +output_encoding = u'utf-8' + +# A dictionary containing any per-command configuration overrides. Currently +# only `command_case` is supported. +per_command = {} diff --git a/contrib/draco/.gitignore b/contrib/draco/.gitignore new file mode 100644 index 000000000..522866ee2 --- /dev/null +++ b/contrib/draco/.gitignore @@ -0,0 +1 @@ +docs/_site diff --git a/contrib/draco/.ruby-version b/contrib/draco/.ruby-version new file mode 100644 index 000000000..276cbf9e2 --- /dev/null +++ b/contrib/draco/.ruby-version @@ -0,0 +1 @@ +2.3.0 diff --git a/contrib/draco/.travis.yml b/contrib/draco/.travis.yml new file mode 100644 index 000000000..e9ef7123f --- /dev/null +++ b/contrib/draco/.travis.yml @@ -0,0 +1,31 @@ +cache: ccache +language: cpp +matrix: + include: + - os: linux + dist: xenial + compiler: clang + - os: linux + dist: xenial + compiler: gcc + - os: osx + compiler: clang + +addons: + apt: + packages: + - cmake + +script: + # Output version info for compilers, cmake, and make + - ${CC} -v + - ${CXX} -v + - cmake --version + - make --version + # Clone googletest + - pushd .. && git clone https://github.com/google/googletest.git && popd + # Configure and build + - mkdir _travis_build && cd _travis_build + - cmake -G "Unix Makefiles" -DENABLE_TESTS=ON .. + - make -j10 + - ./draco_tests diff --git a/contrib/draco/AUTHORS b/contrib/draco/AUTHORS new file mode 100644 index 000000000..67f63a671 --- /dev/null +++ b/contrib/draco/AUTHORS @@ -0,0 +1,7 @@ +# This is the list of Draco authors for copyright purposes. +# +# This does not necessarily list everyone who has contributed code, since in +# some cases, their employer may be the copyright holder. To see the full list +# of contributors, see the revision history in source control. +Google Inc. +and other contributors diff --git a/contrib/draco/BUILDING.md b/contrib/draco/BUILDING.md new file mode 100644 index 000000000..d33917b88 --- /dev/null +++ b/contrib/draco/BUILDING.md @@ -0,0 +1,301 @@ +_**Contents**_ + + * [CMake Basics](#cmake-basics) + * [Mac OS X](#mac-os-x) + * [Windows](#windows) + * [CMake Build Configuration](#cmake-build-configuration) + * [Debugging and Optimization](#debugging-and-optimization) + * [Googletest Integration](#googletest-integration) + * [Javascript Encoder/Decoder](#javascript-encoderdecoder) + * [WebAssembly Decoder](#webassembly-decoder) + * [WebAssembly Mesh Only Decoder](#webassembly-mesh-only-decoder) + * [WebAssembly Point Cloud Only Decoder](#webassembly-point-cloud-only-decoder) + * [iOS Builds](#ios-builds) + * [Android Studio Project Integration](#android-studio-project-integration) + * [Native Android Builds](#native-android-builds) + * [vcpkg](#vcpkg) + +Building +======== +For all platforms, you must first generate the project/make files and then +compile the examples. + +CMake Basics +------------ + +To generate project/make files for the default toolchain on your system, run +`cmake` from a directory where you would like to generate build files, and pass +it the path to your Draco repository. + +E.g. Starting from Draco root. + +~~~~~ bash +$ mkdir build_dir && cd build_dir +$ cmake ../ +~~~~~ + +On Windows, the above command will produce Visual Studio project files for the +newest Visual Studio detected on the system. On Mac OS X and Linux systems, +the above command will produce a `makefile`. + +To control what types of projects are generated, add the `-G` parameter to the +`cmake` command. This argument must be followed by the name of a generator. +Running `cmake` with the `--help` argument will list the available +generators for your system. + +Mac OS X +--------- + +On Mac OS X, run the following command to generate Xcode projects: + +~~~~~ bash +$ cmake ../ -G Xcode +~~~~~ + +Windows +------- + +On a Windows box you would run the following command to generate Visual Studio +2019 projects: + +~~~~~ bash +C:\Users\nobody> cmake ../ -G "Visual Studio 16 2019" -A Win32 +~~~~~ + +To generate 64-bit Windows Visual Studio 2019 projects: + +~~~~~ bash +C:\Users\nobody> cmake ../ -G "Visual Studio 16 2019" -A x64 +~~~~~ + + +CMake Build Configuration +------------------------- + +Debugging and Optimization +-------------------------- + +Unlike Visual Studio and Xcode projects, the build configuration for make +builds is controlled when you run `cmake`. The following examples demonstrate +various build configurations. + +Omitting the build type produces makefiles that use release build flags +by default: + +~~~~~ bash +$ cmake ../ +~~~~~ + +A makefile using release (optimized) flags is produced like this: + +~~~~~ bash +$ cmake ../ -DCMAKE_BUILD_TYPE=Release +~~~~~ + +A release build with debug info can be produced as well: + +~~~~~ bash +$ cmake ../ -DCMAKE_BUILD_TYPE=RelWithDebInfo +~~~~~ + +And your standard debug build will be produced using: + +~~~~~ bash +$ cmake ../ -DCMAKE_BUILD_TYPE=Debug +~~~~~ + +To enable the use of sanitizers when the compiler in use supports them, set the +sanitizer type when running CMake: + +~~~~~ bash +$ cmake ../ -DDRACO_SANITIZE=address +~~~~~ + +Googletest Integration +---------------------- + +Draco includes testing support built using Googletest. To enable Googletest unit +test support the DRACO_TESTS cmake variable must be turned on at cmake +generation time: + +~~~~~ bash +$ cmake ../ -DDRACO_TESTS=ON +~~~~~ + +When cmake is used as shown in the above example the googletest directory must +be a sibling of the Draco repository root directory. To run the tests execute +`draco_tests` from your build output directory. + +WebAssembly Decoder +------------------- + +The WebAssembly decoder can be built using the existing cmake build file by +passing the path the Emscripten's cmake toolchain file at cmake generation time +in the CMAKE_TOOLCHAIN_FILE variable and enabling the WASM build option. +In addition, the EMSCRIPTEN environment variable must be set to the local path +of the parent directory of the Emscripten tools directory. + +~~~~~ bash +# Make the path to emscripten available to cmake. +$ export EMSCRIPTEN=/path/to/emscripten/tools/parent + +# Emscripten.cmake can be found within your Emscripten installation directory, +# it should be the subdir: cmake/Modules/Platform/Emscripten.cmake +$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake -DDRACO_WASM=ON + +# Build the WebAssembly decoder. +$ make + +# Run the Javascript wrapper through Closure. +$ java -jar closure.jar --compilation_level SIMPLE --js draco_decoder.js --js_output_file draco_wasm_wrapper.js + +~~~~~ + +WebAssembly Mesh Only Decoder +----------------------------- + +~~~~~ bash + +# cmake command line for mesh only WebAssembly decoder. +$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake -DDRACO_WASM=ON -DDRACO_POINT_CLOUD_COMPRESSION=OFF + +~~~~~ + +WebAssembly Point Cloud Only Decoder +----------------------------- + +~~~~~ bash + +# cmake command line for point cloud only WebAssembly decoder. +$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake -DDRACO_WASM=ON -DDRACO_MESH_COMPRESSION=OFF + +~~~~~ + +Javascript Encoder/Decoder +------------------ + +The javascript encoder and decoder can be built using the existing cmake build +file by passing the path the Emscripten's cmake toolchain file at cmake +generation time in the CMAKE_TOOLCHAIN_FILE variable. +In addition, the EMSCRIPTEN environment variable must be set to the local path +of the parent directory of the Emscripten tools directory. + +*Note* The WebAssembly decoder should be favored over the JavaScript decoder. + +~~~~~ bash +# Make the path to emscripten available to cmake. +$ export EMSCRIPTEN=/path/to/emscripten/tools/parent + +# Emscripten.cmake can be found within your Emscripten installation directory, +# it should be the subdir: cmake/Modules/Platform/Emscripten.cmake +$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake + +# Build the Javascript encoder and decoder. +$ make +~~~~~ + +iOS Builds +--------------------- +These are the basic commands needed to build Draco for iOS targets. +~~~~~ bash + +#arm64 +$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/arm64-ios.cmake +$ make + +#x86_64 +$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/x86_64-ios.cmake +$ make + +#armv7 +$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/armv7-ios.cmake +$ make + +#i386 +$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/i386-ios.cmake +$ make +~~~~~~ + +After building for each target the libraries can be merged into a single +universal/fat library using lipo, and then used in iOS applications. + + +Native Android Builds +--------------------- + +It's sometimes useful to build Draco command line tools and run them directly on +Android devices via adb. + +~~~~~ bash +# This example is for armeabi-v7a. +$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/android.cmake \ + -DDRACO_ANDROID_NDK_PATH=path/to/ndk -DANDROID_ABI=armeabi-v7a +$ make + +# See the android.cmake toolchain file for additional ANDROID_ABI options and +# other configurable Android variables. +~~~~~ + +After building the tools they can be moved to an android device via the use of +`adb push`, and then run within an `adb shell` instance. + + +Android Studio Project Integration +---------------------------------- + +Tested on Android Studio 3.5.3. + + +Draco - Static Library +---------------------- + +To include Draco in an existing or new Android Studio project, reference it +from the `cmake` file of an existing native project that has a minimum SDK +version of 18 or higher. The project must support C++11. +To add Draco to your project: + + 1. Create a new "Native C++" project. + + 2. Add the following somewhere within the `CMakeLists.txt` for your project + before the `add_library()` for your project's native-lib: + + ~~~~~ cmake + # Note "/path/to/draco" must be changed to the path where you have cloned + # the Draco sources. + + add_subdirectory(/path/to/draco + ${CMAKE_BINARY_DIR}/draco_build) + include_directories("${CMAKE_BINARY_DIR}" /path/to/draco) + ~~~~~ + + 3. Add the library target "draco" to the `target_link_libraries()` call for + your project's native-lib. The `target_link_libraries()` call for an + empty activity native project looks like this after the addition of + Draco: + + ~~~~~ cmake + target_link_libraries( # Specifies the target library. + native-lib + + # Tells cmake this build depends on libdraco. + draco + + # Links the target library to the log library + # included in the NDK. + ${log-lib} ) + +vcpkg +--------------------- +You can download and install Draco using the +[vcpkg](https://github.com/Microsoft/vcpkg/) dependency manager: + + git clone https://github.com/Microsoft/vcpkg.git + cd vcpkg + ./bootstrap-vcpkg.sh + ./vcpkg integrate install + vcpkg install draco + +The Draco port in vcpkg is kept up to date by Microsoft team members and +community contributors. If the version is out of date, please +[create an issue or pull request](https://github.com/Microsoft/vcpkg) on the +vcpkg repository. diff --git a/contrib/draco/CMAKE.md b/contrib/draco/CMAKE.md new file mode 100644 index 000000000..392c6ce40 --- /dev/null +++ b/contrib/draco/CMAKE.md @@ -0,0 +1,106 @@ +# CMake Build System Overview + +[TOC] + +This document provides a general layout of the Draco CMake build system. + +## Core Build System Files + +These files are listed in order of interest to maintainers of the build system. + +- `CMakeLists.txt` is the main driver of the build system. It's responsible + for defining targets and source lists, surfacing build system options, and + tying the components of the build system together. + +- `cmake/draco_build_definitions.cmake` defines the macro + `draco_set_build_definitions()`, which is called from `CMakeLists.txt` to + configure include paths, compiler and linker flags, library settings, + platform speficic configuration, and other build system settings that + depend on optional build configurations. + +- `cmake/draco_targets.cmake` defines the macros `draco_add_library()` and + `draco_add_executable()` which are used to create all targets in the CMake + build. These macros attempt to behave in a manner that loosely mirrors the + blaze `cc_library()` and `cc_binary()` commands. Note that + `draco_add_executable()` is also used for tests. + +- `cmake/draco_emscripten.cmake` handles Emscripten SDK integration. It + defines several Emscripten specific macros that are required to build the + Emscripten specific targets defined in `CMakeLists.txt`. + +- `cmake/draco_flags.cmake` defines macros related to compiler and linker + flags. Testing macros, macros for isolating flags to specific source files, + and the main flag configuration function for the library are defined here. + +- `cmake/draco_options.cmake` defines macros that control optional features + of draco, and help track draco library and build system options. + +- `cmake/draco_install.cmake` defines the draco install target. + +- `cmake/draco_cpu_detection.cmake` determines the optimization types to + enable based on target system processor as reported by CMake. + +- `cmake/draco_intrinsics.cmake` manages flags for source files that use + intrinsics. It handles detection of whether flags are necessary, and the + application of the flags to the sources that need them when they are + required. + +## Helper and Utility Files + +- `.cmake-format.py` Defines coding style for cmake-format. + +- `cmake/draco_helpers.cmake` defines utility macros. + +- `cmake/draco_sanitizer.cmake` defines the `draco_configure_sanitizer()` + macro, which implements support for `DRACO_SANITIZE`. It handles the + compiler and linker flags necessary for using sanitizers like asan and msan. + +- `cmake/draco_variables.cmake` defines macros for tracking and control of + draco build system variables. + +## Toolchain Files + +These files help facilitate cross compiling of draco for various targets. + +- `cmake/toolchains/aarch64-linux-gnu.cmake` provides cross compilation + support for arm64 targets. + +- `cmake/toolchains/android.cmake` provides cross compilation support for + Android targets. + +- `cmake/toolchains/arm-linux-gnueabihf.cmake` provides cross compilation + support for armv7 targets. + +- `cmake/toolchains/arm64-ios.cmake`, `cmake/toolchains/armv7-ios.cmake`, + and `cmake/toolchains/armv7s-ios.cmake` provide support for iOS. + +- `cmake/toolchains/arm64-linux-gcc.cmake` and + `cmake/toolchains/armv7-linux-gcc.cmake` are deprecated, but remain for + compatibility. `cmake/toolchains/android.cmake` should be used instead. + +- `cmake/toolchains/arm64-android-ndk-libcpp.cmake`, + `cmake/toolchains/armv7-android-ndk-libcpp.cmake`, + `cmake/toolchains/x86-android-ndk-libcpp.cmake`, and + `cmake/toolchains/x86_64-android-ndk-libcpp.cmake` are deprecated, but + remain for compatibility. `cmake/toolchains/android.cmake` should be used + instead. + +- `cmake/toolchains/i386-ios.cmake` and `cmake/toolchains/x86_64-ios.cmake` + provide support for the iOS simulator. + +- `cmake/toolchains/android-ndk-common.cmake` and + `cmake/toolchains/arm-ios-common.cmake` are support files used by other + toolchain files. + +## Template Files + +These files are inputs to the CMake build and are used to generate inputs to the +build system output by CMake. + +- `cmake/draco-config.cmake.template` is used to produce + draco-config.cmake. draco-config.cmake can be used by CMake to find draco + when another CMake project depends on draco. + +- `cmake/draco.pc.template` is used to produce draco's pkg-config file. + Some build systems use pkg-config to configure include and library paths + when they depend upon third party libraries like draco. diff --git a/contrib/draco/CMakeLists.txt b/contrib/draco/CMakeLists.txt new file mode 100644 index 000000000..3da2c664a --- /dev/null +++ b/contrib/draco/CMakeLists.txt @@ -0,0 +1,958 @@ +cmake_minimum_required(VERSION 3.12 FATAL_ERROR) + +# Draco requires C++11. +set(CMAKE_CXX_STANDARD 11) +project(draco C CXX) + +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE Release) +endif() + +set(draco_root "${CMAKE_CURRENT_SOURCE_DIR}") +set(draco_src_root "${draco_root}/src/draco") +set(draco_build "${CMAKE_BINARY_DIR}") + +if("${draco_root}" STREQUAL "${draco_build}") + message( + FATAL_ERROR "Building from within the Draco source tree is not supported.\n" + "Hint: Run these commands\n" + "$ rm -rf CMakeCache.txt CMakeFiles\n" + "$ mkdir -p ../draco_build\n" "$ cd ../draco_build\n" + "And re-run CMake from the draco_build directory.") +endif() + +include(CMakePackageConfigHelpers) +include(FindPythonInterp) +include("${draco_root}/cmake/draco_build_definitions.cmake") +include("${draco_root}/cmake/draco_cpu_detection.cmake") +include("${draco_root}/cmake/draco_emscripten.cmake") +include("${draco_root}/cmake/draco_flags.cmake") +include("${draco_root}/cmake/draco_helpers.cmake") +include("${draco_root}/cmake/draco_install.cmake") +include("${draco_root}/cmake/draco_intrinsics.cmake") +include("${draco_root}/cmake/draco_options.cmake") +include("${draco_root}/cmake/draco_sanitizer.cmake") +include("${draco_root}/cmake/draco_targets.cmake") +include("${draco_root}/cmake/draco_tests.cmake") +include("${draco_root}/cmake/draco_variables.cmake") + +# C++ and linker flags. +draco_track_configuration_variable(DRACO_CXX_FLAGS) +draco_track_configuration_variable(DRACO_EXE_LINKER_FLAGS) + +# Sanitizer integration. +draco_track_configuration_variable(DRACO_SANITIZE) + +# Generated source file directory. +draco_track_configuration_variable(DRACO_GENERATED_SOURCES_DIRECTORY) + +# Controls use of std::mutex and absl::Mutex in ThreadPool. +draco_track_configuration_variable(DRACO_THREADPOOL_USE_STD_MUTEX) + +if(DRACO_VERBOSE) + draco_dump_cmake_flag_variables() + draco_dump_tracked_configuration_variables() + draco_dump_options() +endif() + +# Compiler/linker flags must be lists, but come in from the environment as +# strings. Break them up: +if(NOT "${DRACO_CXX_FLAGS}" STREQUAL "") + separate_arguments(DRACO_CXX_FLAGS) +endif() +if(NOT "${DRACO_EXE_LINKER_FLAGS}" STREQUAL "") + separate_arguments(DRACO_EXE_LINKER_FLAGS) +endif() + +draco_reset_target_lists() +draco_setup_options() +draco_set_build_definitions() +draco_set_cxx_flags() +draco_generate_features_h() + +# Draco source file listing variables. +list(APPEND draco_attributes_sources + "${draco_src_root}/attributes/attribute_octahedron_transform.cc" + "${draco_src_root}/attributes/attribute_octahedron_transform.h" + "${draco_src_root}/attributes/attribute_quantization_transform.cc" + "${draco_src_root}/attributes/attribute_quantization_transform.h" + "${draco_src_root}/attributes/attribute_transform.cc" + "${draco_src_root}/attributes/attribute_transform.h" + "${draco_src_root}/attributes/attribute_transform_data.h" + "${draco_src_root}/attributes/attribute_transform_type.h" + "${draco_src_root}/attributes/geometry_attribute.cc" + "${draco_src_root}/attributes/geometry_attribute.h" + "${draco_src_root}/attributes/geometry_indices.h" + "${draco_src_root}/attributes/point_attribute.cc" + "${draco_src_root}/attributes/point_attribute.h") + +list( + APPEND + draco_compression_attributes_dec_sources + "${draco_src_root}/compression/attributes/attributes_decoder.cc" + "${draco_src_root}/compression/attributes/attributes_decoder.h" + "${draco_src_root}/compression/attributes/kd_tree_attributes_decoder.cc" + "${draco_src_root}/compression/attributes/kd_tree_attributes_decoder.h" + "${draco_src_root}/compression/attributes/kd_tree_attributes_shared.h" + "${draco_src_root}/compression/attributes/mesh_attribute_indices_encoding_data.h" + "${draco_src_root}/compression/attributes/normal_compression_utils.h" + "${draco_src_root}/compression/attributes/point_d_vector.h" + "${draco_src_root}/compression/attributes/sequential_attribute_decoder.cc" + "${draco_src_root}/compression/attributes/sequential_attribute_decoder.h" + "${draco_src_root}/compression/attributes/sequential_attribute_decoders_controller.cc" + "${draco_src_root}/compression/attributes/sequential_attribute_decoders_controller.h" + "${draco_src_root}/compression/attributes/sequential_integer_attribute_decoder.cc" + "${draco_src_root}/compression/attributes/sequential_integer_attribute_decoder.h" + "${draco_src_root}/compression/attributes/sequential_normal_attribute_decoder.cc" + "${draco_src_root}/compression/attributes/sequential_normal_attribute_decoder.h" + "${draco_src_root}/compression/attributes/sequential_quantization_attribute_decoder.cc" + "${draco_src_root}/compression/attributes/sequential_quantization_attribute_decoder.h" + ) + +list( + APPEND + draco_compression_attributes_enc_sources + "${draco_src_root}/compression/attributes/attributes_encoder.cc" + "${draco_src_root}/compression/attributes/attributes_encoder.h" + "${draco_src_root}/compression/attributes/kd_tree_attributes_encoder.cc" + "${draco_src_root}/compression/attributes/kd_tree_attributes_encoder.h" + "${draco_src_root}/compression/attributes/linear_sequencer.h" + "${draco_src_root}/compression/attributes/points_sequencer.h" + "${draco_src_root}/compression/attributes/sequential_attribute_encoder.cc" + "${draco_src_root}/compression/attributes/sequential_attribute_encoder.h" + "${draco_src_root}/compression/attributes/sequential_attribute_encoders_controller.cc" + "${draco_src_root}/compression/attributes/sequential_attribute_encoders_controller.h" + "${draco_src_root}/compression/attributes/sequential_integer_attribute_encoder.cc" + "${draco_src_root}/compression/attributes/sequential_integer_attribute_encoder.h" + "${draco_src_root}/compression/attributes/sequential_normal_attribute_encoder.cc" + "${draco_src_root}/compression/attributes/sequential_normal_attribute_encoder.h" + "${draco_src_root}/compression/attributes/sequential_quantization_attribute_encoder.cc" + "${draco_src_root}/compression/attributes/sequential_quantization_attribute_encoder.h" + ) + + +list( + APPEND + draco_compression_attributes_pred_schemes_dec_sources + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_decoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_decoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_decoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_decoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_decoder_interface.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_decoding_transform.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_delta_decoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_factory.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_interface.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_wrap_decoding_transform.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h" + ) + +list( + APPEND + draco_compression_attributes_pred_schemes_enc_sources + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_delta_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_encoder_interface.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_encoding_transform.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_factory.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_interface.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_encoding_transform.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_wrap_encoding_transform.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h" + ) + +list( + APPEND + draco_compression_bit_coders_sources + "${draco_src_root}/compression/bit_coders/adaptive_rans_bit_coding_shared.h" + "${draco_src_root}/compression/bit_coders/adaptive_rans_bit_decoder.cc" + "${draco_src_root}/compression/bit_coders/adaptive_rans_bit_decoder.h" + "${draco_src_root}/compression/bit_coders/adaptive_rans_bit_encoder.cc" + "${draco_src_root}/compression/bit_coders/adaptive_rans_bit_encoder.h" + "${draco_src_root}/compression/bit_coders/direct_bit_decoder.cc" + "${draco_src_root}/compression/bit_coders/direct_bit_decoder.h" + "${draco_src_root}/compression/bit_coders/direct_bit_encoder.cc" + "${draco_src_root}/compression/bit_coders/direct_bit_encoder.h" + "${draco_src_root}/compression/bit_coders/folded_integer_bit_decoder.h" + "${draco_src_root}/compression/bit_coders/folded_integer_bit_encoder.h" + "${draco_src_root}/compression/bit_coders/rans_bit_decoder.cc" + "${draco_src_root}/compression/bit_coders/rans_bit_decoder.h" + "${draco_src_root}/compression/bit_coders/rans_bit_encoder.cc" + "${draco_src_root}/compression/bit_coders/rans_bit_encoder.h" + "${draco_src_root}/compression/bit_coders/symbol_bit_decoder.cc" + "${draco_src_root}/compression/bit_coders/symbol_bit_decoder.h" + "${draco_src_root}/compression/bit_coders/symbol_bit_encoder.cc" + "${draco_src_root}/compression/bit_coders/symbol_bit_encoder.h") + +list(APPEND draco_enc_config_sources + "${draco_src_root}/compression/config/compression_shared.h" + "${draco_src_root}/compression/config/draco_options.h" + "${draco_src_root}/compression/config/encoder_options.h" + "${draco_src_root}/compression/config/encoding_features.h") + +list(APPEND draco_dec_config_sources + "${draco_src_root}/compression/config/compression_shared.h" + "${draco_src_root}/compression/config/decoder_options.h" + "${draco_src_root}/compression/config/draco_options.h") + +list(APPEND draco_compression_decode_sources + "${draco_src_root}/compression/decode.cc" + "${draco_src_root}/compression/decode.h") + +list(APPEND draco_compression_encode_sources + "${draco_src_root}/compression/encode.cc" + "${draco_src_root}/compression/encode.h" + "${draco_src_root}/compression/encode_base.h" + "${draco_src_root}/compression/expert_encode.cc" + "${draco_src_root}/compression/expert_encode.h") + +list( + APPEND + draco_compression_mesh_traverser_sources + "${draco_src_root}/compression/mesh/traverser/depth_first_traverser.h" + "${draco_src_root}/compression/mesh/traverser/max_prediction_degree_traverser.h" + "${draco_src_root}/compression/mesh/traverser/mesh_attribute_indices_encoding_observer.h" + "${draco_src_root}/compression/mesh/traverser/mesh_traversal_sequencer.h" + "${draco_src_root}/compression/mesh/traverser/traverser_base.h") + +list( + APPEND + draco_compression_mesh_dec_sources + "${draco_src_root}/compression/mesh/mesh_decoder.cc" + "${draco_src_root}/compression/mesh/mesh_decoder.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_decoder.cc" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_decoder.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_decoder_impl.cc" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_decoder_impl.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_decoder_impl_interface.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_shared.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_traversal_decoder.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h" + "${draco_src_root}/compression/mesh/mesh_sequential_decoder.cc" + "${draco_src_root}/compression/mesh/mesh_sequential_decoder.h") + +list( + APPEND + draco_compression_mesh_enc_sources + "${draco_src_root}/compression/mesh/mesh_edgebreaker_encoder.cc" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_encoder.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_encoder_impl.cc" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_encoder_impl.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_encoder_impl_interface.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_shared.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_traversal_encoder.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h" + "${draco_src_root}/compression/mesh/mesh_encoder.cc" + "${draco_src_root}/compression/mesh/mesh_encoder.h" + "${draco_src_root}/compression/mesh/mesh_sequential_encoder.cc" + "${draco_src_root}/compression/mesh/mesh_sequential_encoder.h") + +list( + APPEND + draco_compression_point_cloud_dec_sources + "${draco_src_root}/compression/point_cloud/point_cloud_decoder.cc" + "${draco_src_root}/compression/point_cloud/point_cloud_decoder.h" + "${draco_src_root}/compression/point_cloud/point_cloud_kd_tree_decoder.cc" + "${draco_src_root}/compression/point_cloud/point_cloud_kd_tree_decoder.h" + "${draco_src_root}/compression/point_cloud/point_cloud_sequential_decoder.cc" + "${draco_src_root}/compression/point_cloud/point_cloud_sequential_decoder.h" + ) + +list( + APPEND + draco_compression_point_cloud_enc_sources + "${draco_src_root}/compression/point_cloud/point_cloud_encoder.cc" + "${draco_src_root}/compression/point_cloud/point_cloud_encoder.h" + "${draco_src_root}/compression/point_cloud/point_cloud_kd_tree_encoder.cc" + "${draco_src_root}/compression/point_cloud/point_cloud_kd_tree_encoder.h" + "${draco_src_root}/compression/point_cloud/point_cloud_sequential_encoder.cc" + "${draco_src_root}/compression/point_cloud/point_cloud_sequential_encoder.h" + ) + +list(APPEND draco_compression_entropy_sources + "${draco_src_root}/compression/entropy/ans.h" + "${draco_src_root}/compression/entropy/rans_symbol_coding.h" + "${draco_src_root}/compression/entropy/rans_symbol_decoder.h" + "${draco_src_root}/compression/entropy/rans_symbol_encoder.h" + "${draco_src_root}/compression/entropy/shannon_entropy.cc" + "${draco_src_root}/compression/entropy/shannon_entropy.h" + "${draco_src_root}/compression/entropy/symbol_decoding.cc" + "${draco_src_root}/compression/entropy/symbol_decoding.h" + "${draco_src_root}/compression/entropy/symbol_encoding.cc" + "${draco_src_root}/compression/entropy/symbol_encoding.h") + +list(APPEND draco_core_sources + "${draco_src_root}/core/bit_utils.cc" + "${draco_src_root}/core/bit_utils.h" + "${draco_src_root}/core/bounding_box.cc" + "${draco_src_root}/core/bounding_box.h" + "${draco_src_root}/core/cycle_timer.cc" + "${draco_src_root}/core/cycle_timer.h" + "${draco_src_root}/core/data_buffer.cc" + "${draco_src_root}/core/data_buffer.h" + "${draco_src_root}/core/decoder_buffer.cc" + "${draco_src_root}/core/decoder_buffer.h" + "${draco_src_root}/core/divide.cc" + "${draco_src_root}/core/divide.h" + "${draco_src_root}/core/draco_index_type.h" + "${draco_src_root}/core/draco_index_type_vector.h" + "${draco_src_root}/core/draco_types.cc" + "${draco_src_root}/core/draco_types.h" + "${draco_src_root}/core/encoder_buffer.cc" + "${draco_src_root}/core/encoder_buffer.h" + "${draco_src_root}/core/hash_utils.cc" + "${draco_src_root}/core/hash_utils.h" + "${draco_src_root}/core/macros.h" + "${draco_src_root}/core/math_utils.h" + "${draco_src_root}/core/options.cc" + "${draco_src_root}/core/options.h" + "${draco_src_root}/core/quantization_utils.cc" + "${draco_src_root}/core/quantization_utils.h" + "${draco_src_root}/core/status.h" + "${draco_src_root}/core/status_or.h" + "${draco_src_root}/core/varint_decoding.h" + "${draco_src_root}/core/varint_encoding.h" + "${draco_src_root}/core/vector_d.h") + +list(APPEND draco_io_sources + "${draco_src_root}/io/file_reader_factory.cc" + "${draco_src_root}/io/file_reader_factory.h" + "${draco_src_root}/io/file_reader_interface.h" + "${draco_src_root}/io/file_utils.cc" + "${draco_src_root}/io/file_utils.h" + "${draco_src_root}/io/file_writer_factory.cc" + "${draco_src_root}/io/file_writer_factory.h" + "${draco_src_root}/io/file_writer_interface.h" + "${draco_src_root}/io/file_writer_utils.h" + "${draco_src_root}/io/file_writer_utils.cc" + "${draco_src_root}/io/mesh_io.cc" + "${draco_src_root}/io/mesh_io.h" + "${draco_src_root}/io/obj_decoder.cc" + "${draco_src_root}/io/obj_decoder.h" + "${draco_src_root}/io/obj_encoder.cc" + "${draco_src_root}/io/obj_encoder.h" + "${draco_src_root}/io/parser_utils.cc" + "${draco_src_root}/io/parser_utils.h" + "${draco_src_root}/io/ply_decoder.cc" + "${draco_src_root}/io/ply_decoder.h" + "${draco_src_root}/io/ply_encoder.cc" + "${draco_src_root}/io/ply_encoder.h" + "${draco_src_root}/io/ply_property_reader.h" + "${draco_src_root}/io/ply_property_writer.h" + "${draco_src_root}/io/ply_reader.cc" + "${draco_src_root}/io/ply_reader.h" + "${draco_src_root}/io/point_cloud_io.cc" + "${draco_src_root}/io/point_cloud_io.h" + "${draco_src_root}/io/stdio_file_reader.cc" + "${draco_src_root}/io/stdio_file_reader.h" + "${draco_src_root}/io/stdio_file_writer.cc" + "${draco_src_root}/io/stdio_file_writer.h") + +list(APPEND draco_mesh_sources + "${draco_src_root}/mesh/corner_table.cc" + "${draco_src_root}/mesh/corner_table.h" + "${draco_src_root}/mesh/corner_table_iterators.h" + "${draco_src_root}/mesh/mesh.cc" + "${draco_src_root}/mesh/mesh.h" + "${draco_src_root}/mesh/mesh_are_equivalent.cc" + "${draco_src_root}/mesh/mesh_are_equivalent.h" + "${draco_src_root}/mesh/mesh_attribute_corner_table.cc" + "${draco_src_root}/mesh/mesh_attribute_corner_table.h" + "${draco_src_root}/mesh/mesh_cleanup.cc" + "${draco_src_root}/mesh/mesh_cleanup.h" + "${draco_src_root}/mesh/mesh_misc_functions.cc" + "${draco_src_root}/mesh/mesh_misc_functions.h" + "${draco_src_root}/mesh/mesh_stripifier.cc" + "${draco_src_root}/mesh/mesh_stripifier.h" + "${draco_src_root}/mesh/triangle_soup_mesh_builder.cc" + "${draco_src_root}/mesh/triangle_soup_mesh_builder.h" + "${draco_src_root}/mesh/valence_cache.h") + +list(APPEND draco_point_cloud_sources + "${draco_src_root}/point_cloud/point_cloud.cc" + "${draco_src_root}/point_cloud/point_cloud.h" + "${draco_src_root}/point_cloud/point_cloud_builder.cc" + "${draco_src_root}/point_cloud/point_cloud_builder.h") + +list( + APPEND + draco_points_common_sources + "${draco_src_root}/compression/point_cloud/algorithms/point_cloud_compression_method.h" + "${draco_src_root}/compression/point_cloud/algorithms/point_cloud_types.h" + "${draco_src_root}/compression/point_cloud/algorithms/quantize_points_3.h" + "${draco_src_root}/compression/point_cloud/algorithms/queuing_policy.h") + +list( + APPEND + draco_points_dec_sources + "${draco_src_root}/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.cc" + "${draco_src_root}/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h" + "${draco_src_root}/compression/point_cloud/algorithms/float_points_tree_decoder.cc" + "${draco_src_root}/compression/point_cloud/algorithms/float_points_tree_decoder.h" + ) + +list( + APPEND + draco_points_enc_sources + "${draco_src_root}/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.cc" + "${draco_src_root}/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.h" + "${draco_src_root}/compression/point_cloud/algorithms/float_points_tree_encoder.cc" + "${draco_src_root}/compression/point_cloud/algorithms/float_points_tree_encoder.h" + ) + +list(APPEND draco_metadata_sources + "${draco_src_root}/metadata/geometry_metadata.cc" + "${draco_src_root}/metadata/geometry_metadata.h" + "${draco_src_root}/metadata/metadata.cc" + "${draco_src_root}/metadata/metadata.h") + +list(APPEND draco_metadata_enc_sources + "${draco_src_root}/metadata/metadata_encoder.cc" + "${draco_src_root}/metadata/metadata_encoder.h") + +list(APPEND draco_metadata_dec_sources + "${draco_src_root}/metadata/metadata_decoder.cc" + "${draco_src_root}/metadata/metadata_decoder.h") + +list(APPEND draco_animation_sources + "${draco_src_root}/animation/keyframe_animation.cc" + "${draco_src_root}/animation/keyframe_animation.h") + +list(APPEND draco_animation_enc_sources + "${draco_src_root}/animation/keyframe_animation_encoder.cc" + "${draco_src_root}/animation/keyframe_animation_encoder.h") + +list(APPEND draco_animation_dec_sources + "${draco_src_root}/animation/keyframe_animation_decoder.cc" + "${draco_src_root}/animation/keyframe_animation_decoder.h") + +list( + APPEND draco_js_dec_sources + "${draco_src_root}/javascript/emscripten/decoder_webidl_wrapper.cc" + "${draco_src_root}/javascript/emscripten/draco_decoder_glue_wrapper.cc" + ) + +list( + APPEND draco_js_enc_sources + "${draco_src_root}/javascript/emscripten/draco_encoder_glue_wrapper.cc" + "${draco_src_root}/javascript/emscripten/encoder_webidl_wrapper.cc") + +list( + APPEND + draco_animation_js_dec_sources + "${draco_src_root}/javascript/emscripten/animation_decoder_webidl_wrapper.cc" + "${draco_src_root}/javascript/emscripten/draco_animation_decoder_glue_wrapper.cc" + ) + +list( + APPEND + draco_animation_js_enc_sources + "${draco_src_root}/javascript/emscripten/animation_encoder_webidl_wrapper.cc" + "${draco_src_root}/javascript/emscripten/draco_animation_encoder_glue_wrapper.cc" + ) + +list(APPEND draco_unity_plug_sources + "${draco_src_root}/unity/draco_unity_plugin.cc" + "${draco_src_root}/unity/draco_unity_plugin.h") + +list(APPEND draco_maya_plug_sources + "${draco_src_root}/maya/draco_maya_plugin.cc" + "${draco_src_root}/maya/draco_maya_plugin.h") + +# +# Draco targets. +# +if(EMSCRIPTEN AND DRACO_JS_GLUE) + # Draco decoder and encoder "executable" targets in various flavors for + # Emsscripten. + list(APPEND draco_decoder_src + ${draco_attributes_sources} + ${draco_compression_attributes_dec_sources} + ${draco_compression_attributes_pred_schemes_dec_sources} + ${draco_compression_bit_coders_sources} + ${draco_compression_decode_sources} + ${draco_compression_entropy_sources} + ${draco_compression_mesh_traverser_sources} + ${draco_compression_mesh_dec_sources} + ${draco_compression_point_cloud_dec_sources} + ${draco_core_sources} + ${draco_dec_config_sources} + ${draco_js_dec_sources} + ${draco_mesh_sources} + ${draco_metadata_dec_sources} + ${draco_metadata_sources} + ${draco_point_cloud_sources} + ${draco_points_dec_sources}) + + list(APPEND draco_encoder_src + ${draco_attributes_sources} + ${draco_compression_attributes_enc_sources} + ${draco_compression_attributes_pred_schemes_enc_sources} + ${draco_compression_bit_coders_sources} + ${draco_compression_encode_sources} + ${draco_compression_entropy_sources} + ${draco_compression_mesh_traverser_sources} + ${draco_compression_mesh_enc_sources} + ${draco_compression_point_cloud_enc_sources} + ${draco_core_sources} + ${draco_enc_config_sources} + ${draco_js_enc_sources} + ${draco_mesh_sources} + ${draco_metadata_enc_sources} + ${draco_metadata_sources} + ${draco_point_cloud_sources} + ${draco_points_enc_sources}) + + list(APPEND draco_js_dec_idl + "${draco_src_root}/javascript/emscripten/draco_web_decoder.idl") + list(APPEND draco_js_enc_idl + "${draco_src_root}/javascript/emscripten/draco_web_encoder.idl") + list( + APPEND + draco_animation_js_dec_idl + "${draco_src_root}/javascript/emscripten/draco_animation_web_decoder.idl") + list( + APPEND + draco_animation_js_enc_idl + "${draco_src_root}/javascript/emscripten/draco_animation_web_encoder.idl") + list(APPEND draco_pre_link_js_sources + "${draco_src_root}/javascript/emscripten/prepareCallbacks.js" + "${draco_src_root}/javascript/emscripten/version.js") + list(APPEND draco_post_link_js_sources + "${draco_src_root}/javascript/emscripten/finalize.js") + list(APPEND draco_post_link_js_decoder_sources ${draco_post_link_js_sources} + "${draco_src_root}/javascript/emscripten/decoder_functions.js") + + set(draco_decoder_glue_path "${draco_build}/glue_decoder") + set(draco_encoder_glue_path "${draco_build}/glue_encoder") + + draco_generate_emscripten_glue(INPUT_IDL ${draco_js_dec_idl} OUTPUT_PATH + ${draco_decoder_glue_path}) + draco_generate_emscripten_glue(INPUT_IDL ${draco_js_enc_idl} OUTPUT_PATH + ${draco_encoder_glue_path}) + + if(DRACO_DECODER_ATTRIBUTE_DEDUPLICATION) + list(APPEND draco_decoder_features + "DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED" + "DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED") + endif() + + draco_add_emscripten_executable(NAME + draco_decoder + SOURCES + ${draco_decoder_src} + DEFINES + ${draco_defines} + FEATURES + ${draco_decoder_features} + INCLUDES + ${draco_include_paths} + LINK_FLAGS + "-sEXPORT_NAME=\"DracoDecoderModule\"" + GLUE_PATH + ${draco_decoder_glue_path} + PRE_LINK_JS_SOURCES + ${draco_pre_link_js_sources} + POST_LINK_JS_SOURCES + ${draco_post_link_js_decoder_sources}) + + draco_add_emscripten_executable( + NAME + draco_encoder + SOURCES + ${draco_encoder_src} + DEFINES + ${draco_defines} + FEATURES + DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED + DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED + INCLUDES + ${draco_include_paths} + LINK_FLAGS + "-sEXPORT_NAME=\"DracoEncoderModule\"" + GLUE_PATH + ${draco_encoder_glue_path} + PRE_LINK_JS_SOURCES + ${draco_pre_link_js_sources} + POST_LINK_JS_SOURCES + ${draco_post_link_js_sources}) + + if(DRACO_ANIMATION_ENCODING) + set(draco_anim_decoder_glue_path "${draco_build}/glue_animation_decoder") + set(draco_anim_encoder_glue_path "${draco_build}/glue_animation_encoder") + + draco_generate_emscripten_glue(INPUT_IDL ${draco_animation_js_dec_idl} + OUTPUT_PATH ${draco_anim_decoder_glue_path}) + draco_generate_emscripten_glue(INPUT_IDL ${draco_animation_js_enc_idl} + OUTPUT_PATH ${draco_anim_encoder_glue_path}) + + draco_add_emscripten_executable( + NAME + draco_animation_decoder + SOURCES + ${draco_animation_dec_sources} + ${draco_animation_js_dec_sources} + ${draco_animation_sources} + ${draco_decoder_src} + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths} + LINK_FLAGS + "-sEXPORT_NAME=\"DracoAnimationDecoderModule\"" + GLUE_PATH + ${draco_anim_decoder_glue_path} + PRE_LINK_JS_SOURCES + ${draco_pre_link_js_sources} + POST_LINK_JS_SOURCES + ${draco_post_link_js_decoder_sources}) + + draco_add_emscripten_executable( + NAME + draco_animation_encoder + SOURCES + ${draco_animation_enc_sources} + ${draco_animation_js_enc_sources} + ${draco_animation_sources} + ${draco_encoder_src} + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths} + LINK_FLAGS + "-sEXPORT_NAME=\"DracoAnimationEncoderModule\"" + GLUE_PATH + ${draco_anim_encoder_glue_path} + PRE_LINK_JS_SOURCES + ${draco_pre_link_js_sources} + POST_LINK_JS_SOURCES + ${draco_post_link_js_sources}) + endif() +else() + # Standard Draco libs, encoder and decoder. Object collections that mirror the + # Draco directory structure. + draco_add_library(NAME draco_attributes TYPE OBJECT SOURCES + ${draco_attributes_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME + draco_compression_attributes_dec + OBJECT + ${draco_compression_attributes_dec_sources} + TYPE + OBJECT + SOURCES + ${draco_compression_attributes_dec_sources} + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths}) + draco_add_library(NAME draco_compression_attributes_enc TYPE OBJECT SOURCES + ${draco_compression_attributes_enc_sources} DEFINES + ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_attributes_pred_schemes_dec TYPE + OBJECT SOURCES + ${draco_compression_attributes_pred_schemes_dec_sources}) + draco_add_library(NAME draco_compression_attributes_pred_schemes_enc TYPE + OBJECT SOURCES + ${draco_compression_attributes_pred_schemes_enc_sources} + DEFINES ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_bit_coders TYPE OBJECT SOURCES + ${draco_compression_bit_coders_sources} DEFINES + ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_enc_config TYPE OBJECT SOURCES + ${draco_enc_config_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_dec_config TYPE OBJECT SOURCES + ${draco_dec_config_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_decode TYPE OBJECT SOURCES + ${draco_compression_decode_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_encode TYPE OBJECT SOURCES + ${draco_compression_encode_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_entropy TYPE OBJECT SOURCES + ${draco_compression_entropy_sources} DEFINES + ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_mesh_traverser TYPE OBJECT SOURCES + ${draco_compression_mesh_traverser_sources} DEFINES + ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_mesh_dec TYPE OBJECT SOURCES + ${draco_compression_mesh_dec_sources} DEFINES + ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_mesh_enc TYPE OBJECT SOURCES + ${draco_compression_mesh_enc_sources} DEFINES + ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_point_cloud_dec TYPE OBJECT SOURCES + ${draco_compression_point_cloud_dec_sources} DEFINES + ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_point_cloud_enc TYPE OBJECT SOURCES + ${draco_compression_point_cloud_enc_sources} DEFINES + ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_core TYPE OBJECT SOURCES ${draco_core_sources} + DEFINES ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_io TYPE OBJECT SOURCES ${draco_io_sources} + DEFINES ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_mesh TYPE OBJECT SOURCES ${draco_mesh_sources} + DEFINES ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_metadata_dec TYPE OBJECT SOURCES + ${draco_metadata_dec_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_metadata_enc TYPE OBJECT SOURCES + ${draco_metadata_enc_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_metadata TYPE OBJECT SOURCES + ${draco_metadata_sources} DEFINES ${draco_defines} INCLUDES + ${draco_include_paths}) + draco_add_library(NAME draco_animation_dec TYPE OBJECT SOURCES + ${draco_animation_dec_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_animation_enc TYPE OBJECT SOURCES + ${draco_animation_enc_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_animation TYPE OBJECT SOURCES + ${draco_animation_sources} DEFINES ${draco_defines} INCLUDES + ${draco_include_paths}) + draco_add_library(NAME draco_point_cloud TYPE OBJECT SOURCES + ${draco_point_cloud_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME + draco_points_dec + TYPE + OBJECT + SOURCES + ${draco_points_common_sources} + ${draco_points_dec_sources} + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths}) + draco_add_library(NAME + draco_points_enc + TYPE + OBJECT + SOURCES + ${draco_points_common_sources} + ${draco_points_enc_sources} + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths}) + + set(draco_object_library_deps + draco_attributes + draco_compression_attributes_dec + draco_compression_attributes_enc + draco_compression_attributes_pred_schemes_dec + draco_compression_attributes_pred_schemes_enc + draco_compression_bit_coders + draco_compression_decode + draco_compression_encode + draco_compression_entropy + draco_compression_mesh_dec + draco_compression_mesh_enc + draco_compression_point_cloud_dec + draco_compression_point_cloud_enc + draco_core + draco_dec_config + draco_enc_config + draco_io + draco_mesh + draco_metadata + draco_metadata_dec + draco_metadata_enc + draco_animation + draco_animation_dec + draco_animation_enc + draco_point_cloud + draco_points_dec + draco_points_enc) + + # Library targets that consume the object collections. + if(MSVC OR WIN32) + # In order to produce a DLL and import library the Windows tools require + # that the exported symbols are part of the DLL target. The unfortunate side + # effect of this is that a single configuration cannot output both the + # static library and the DLL: This results in an either/or situation. + # Windows users of the draco build can have a DLL and an import library, + # or they can have a static library; they cannot have both from a single + # configuration of the build. + if(BUILD_SHARED_LIBS) + set(draco_lib_type SHARED) + else() + set(draco_lib_type STATIC) + endif() + draco_add_library(NAME + draco + OUTPUT_NAME + draco + TYPE + ${draco_lib_type} + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths} + OBJLIB_DEPS + ${draco_object_library_deps}) + + else() + draco_add_library(NAME + draco_static + OUTPUT_NAME + draco + TYPE + STATIC + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths} + OBJLIB_DEPS + ${draco_object_library_deps}) + + if(BUILD_SHARED_LIBS) + draco_add_library(NAME + draco_shared + SOURCES + "${draco_src_root}/core/draco_version.h" + OUTPUT_NAME + draco + TYPE + SHARED + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths} + LIB_DEPS + draco_static) + endif() + endif() + + if(DRACO_UNITY_PLUGIN) + if(IOS) + set(unity_decoder_lib_type STATIC) + else() + set(unity_decoder_lib_type MODULE) + endif() + + draco_add_library(NAME draco_unity_plugin TYPE OBJECT SOURCES + ${draco_unity_plug_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + + draco_add_library(NAME + dracodec_unity + TYPE + ${unity_decoder_lib_type} + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths} + OBJLIB_DEPS + draco_unity_plugin + LIB_DEPS + ${draco_plugin_dependency}) + + # For Mac, we need to build a .bundle for the unity plugin. + if(APPLE) + set_target_properties(dracodec_unity PROPERTIES BUNDLE true) + elseif(NOT unity_decoder_lib_type STREQUAL STATIC) + set_target_properties(dracodec_unity + PROPERTIES SOVERSION ${DRACO_SOVERSION}) + endif() + endif() + + if(DRACO_MAYA_PLUGIN) + draco_add_library(NAME draco_maya_plugin TYPE OBJECT SOURCES + ${draco_maya_plug_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + + draco_add_library(NAME + draco_maya_wrapper + TYPE + MODULE + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths} + OBJLIB_DEPS + draco_maya_plugin + LIB_DEPS + ${draco_plugin_dependency}) + + # For Mac, we need to build a .bundle for the plugin. + if(APPLE) + set_target_properties(draco_maya_wrapper PROPERTIES BUNDLE true) + else() + set_target_properties(draco_maya_wrapper + PROPERTIES SOVERSION ${DRACO_SOVERSION}) + endif() + endif() + + # Draco app targets. + draco_add_executable(NAME + draco_decoder + SOURCES + "${draco_src_root}/tools/draco_decoder.cc" + ${draco_io_sources} + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths} + LIB_DEPS + ${draco_dependency}) + + draco_add_executable(NAME + draco_encoder + SOURCES + "${draco_src_root}/tools/draco_encoder.cc" + ${draco_io_sources} + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths} + LIB_DEPS + ${draco_dependency}) + + draco_setup_install_target() + draco_setup_test_targets() +endif() + +if(DRACO_VERBOSE) + draco_dump_cmake_flag_variables() + draco_dump_tracked_configuration_variables() + draco_dump_options() +endif() diff --git a/contrib/draco/CONTRIBUTING.md b/contrib/draco/CONTRIBUTING.md new file mode 100644 index 000000000..b7bab3447 --- /dev/null +++ b/contrib/draco/CONTRIBUTING.md @@ -0,0 +1,27 @@ +Want to contribute? Great! First, read this page (including the small print at the end). + +### Before you contribute +Before we can use your code, you must sign the +[Google Individual Contributor License Agreement](https://cla.developers.google.com/about/google-individual) +(CLA), which you can do online. The CLA is necessary mainly because you own the +copyright to your changes, even after your contribution becomes part of our +codebase, so we need your permission to use and distribute your code. We also +need to be sure of various other things—for instance that you'll tell us if you +know that your code infringes on other people's patents. You don't have to sign +the CLA until after you've submitted your code for review and a member has +approved it, but you must do it before we can put your code into our codebase. +Before you start working on a larger contribution, you should get in touch with +us first through the issue tracker with your idea so that we can help out and +possibly guide you. Coordinating up front makes it much easier to avoid +frustration later on. + +### Code reviews +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. +Please make sure that your code conforms with our +[coding style guidelines](https://google.github.io/styleguide/cppguide.html). + +### The small print +Contributions made by corporations are covered by a different agreement than +the one above, the +[Software Grant and Corporate Contributor License Agreement](https://cla.developers.google.com/about/google-corporate). diff --git a/contrib/draco/LICENSE b/contrib/draco/LICENSE new file mode 100644 index 000000000..301095454 --- /dev/null +++ b/contrib/draco/LICENSE @@ -0,0 +1,252 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +Files: docs/assets/js/ASCIIMathML.js + +Copyright (c) 2014 Peter Jipsen and other ASCIIMathML.js contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------------------- +Files: docs/assets/css/pygments/* + +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/contrib/draco/README.md b/contrib/draco/README.md new file mode 100644 index 000000000..add66edcb --- /dev/null +++ b/contrib/draco/README.md @@ -0,0 +1,478 @@ +

+ +

+ +![Build Status: master](https://travis-ci.org/google/draco.svg?branch=master) + +News +======= +### Version 1.4.1 release +* Using the versioned gstatic.com WASM and Javascript decoders is now + recommended. To use v1.4.1, use this URL: + * https://www.gstatic.com/draco/versioned/decoders/1.4.1/* + * Replace the * with the files to load. E.g. + * https://gstatic.com/draco/versioned/decoders/1.4.1/draco_decoder.js + * This works with the v1.3.6 and v1.4.0 releases, and will work with future + Draco releases. +* Bug fixes + +### Version 1.4.0 release +* WASM and JavaScript decoders are hosted from a static URL. + * It is recommended to always pull your Draco WASM and JavaScript decoders from this URL: + * https://www.gstatic.com/draco/v1/decoders/* + * Replace * with the files to load. E.g. + * https://www.gstatic.com/draco/v1/decoders/draco_decoder_gltf.wasm + * Users will benefit from having the Draco decoder in cache as more sites start using the static URL +* Changed npm modules to use WASM, which increased performance by ~200%. +* Updated Emscripten to 2.0. + * This causes the Draco codec modules to return a promise instead of the module directly. + * Please see the example code on how to handle the promise. +* Changed NORMAL quantization default to 8. +* Added new array API to decoder and deprecated DecoderBuffer. + * See PR https://github.com/google/draco/issues/513 for more information. +* Changed WASM/JavaScript behavior of catching exceptions. + * See issue https://github.com/google/draco/issues/629 for more information. +* Code cleanup. +* Emscripten builds now disable NODEJS_CATCH_EXIT and NODEJS_CATCH_REJECTION. + * Authors of a CLI tool might want to add their own error handlers. +* Added Maya plugin builds. +* Unity plugin builds updated. + * Builds are now stored as archives. + * Added iOS build. + * Unity users may want to look into https://github.com/atteneder/DracoUnity. +* Bug fixes. + +### Version 1.3.6 release +* WASM and JavaScript decoders are now hosted from a static URL + * It is recommended to always pull your Draco WASM and JavaScript decoders from this URL: + * https://www.gstatic.com/draco/v1/decoders/* + * Replace * with the files to load. E.g. + * https://www.gstatic.com/draco/v1/decoders/draco_decoder_gltf.wasm + * Users will benefit from having the Draco decoder in cache as more sites start using the static URL +* Changed web examples to pull Draco decoders from static URL +* Added new API to Draco WASM decoder, which increased performance by ~15% +* Decreased Draco WASM decoder size by ~20% +* Added support for generic and multiple attributes to Draco Unity plug-ins +* Added new API to Draco Unity, which increased decoder performance by ~15% +* Changed quantization defaults: + * POSITION: 11 + * NORMAL: 7 + * TEX_COORD: 10 + * COLOR: 8 + * GENERIC: 8 +* Code cleanup +* Bug fixes + +### Version 1.3.5 release +* Added option to build Draco for Universal Scene Description +* Code cleanup +* Bug fixes + +### Version 1.3.4 release +* Released Draco Animation code +* Fixes for Unity +* Various file location and name changes + +### Version 1.3.3 release +* Added ExpertEncoder to the Javascript API + * Allows developers to set quantization options per attribute id +* Bug fixes + +### Version 1.3.2 release +* Bug fixes + +### Version 1.3.1 release +* Fix issue with multiple attributes when skipping an attribute transform + +### Version 1.3.0 release +* Improved kD-tree based point cloud encoding + * Now applicable to point clouds with any number of attributes + * Support for all integer attribute types and quantized floating point types +* Improved mesh compression up to 10% (on average ~2%) + * For meshes, the 1.3.0 bitstream is fully compatible with 1.2.x decoders +* Improved Javascript API + * Added support for all signed and unsigned integer types + * Added support for point clouds to our Javascript encoder API +* Added support for integer properties to the PLY decoder +* Bug fixes + +### Previous releases +https://github.com/google/draco/releases + +Description +=========== + +Draco is a library for compressing and decompressing 3D geometric [meshes] and +[point clouds]. It is intended to improve the storage and transmission of 3D +graphics. + +Draco was designed and built for compression efficiency and speed. The code +supports compressing points, connectivity information, texture coordinates, +color information, normals, and any other generic attributes associated with +geometry. With Draco, applications using 3D graphics can be significantly +smaller without compromising visual fidelity. For users, this means apps can +now be downloaded faster, 3D graphics in the browser can load quicker, and VR +and AR scenes can now be transmitted with a fraction of the bandwidth and +rendered quickly. + +Draco is released as C++ source code that can be used to compress 3D graphics +as well as C++ and Javascript decoders for the encoded data. + + +_**Contents**_ + + * [Building](#building) + * [Usage](#usage) + * [Unity](#unity) + * [WASM and JavaScript Decoders](#WASM-and-JavaScript-Decoders) + * [Command Line Applications](#command-line-applications) + * [Encoding Tool](#encoding-tool) + * [Encoding Point Clouds](#encoding-point-clouds) + * [Decoding Tool](#decoding-tool) + * [C++ Decoder API](#c-decoder-api) + * [Javascript Encoder API](#javascript-encoder-api) + * [Javascript Decoder API](#javascript-decoder-api) + * [Javascript Decoder Performance](#javascript-decoder-performance) + * [Metadata API](#metadata-api) + * [NPM Package](#npm-package) + * [three.js Renderer Example](#threejs-renderer-example) + * [Support](#support) + * [License](#license) + * [References](#references) + + +Building +======== +See [BUILDING](BUILDING.md) for building instructions. + + +Usage +====== + +Unity +----- +For the best information about using Unity with Draco please visit https://github.com/atteneder/DracoUnity + +For a simple example of using Unity with Draco see [README](unity/README.md) in the unity folder. + +WASM and JavaScript Decoders +---------------------------- + +It is recommended to always pull your Draco WASM and JavaScript decoders from: + +~~~~~ bash +https://www.gstatic.com/draco/v1/decoders/ +~~~~~ + +Users will benefit from having the Draco decoder in cache as more sites start using the static URL. + +Command Line Applications +------------------------ + +The default target created from the build files will be the `draco_encoder` +and `draco_decoder` command line applications. For both applications, if you +run them without any arguments or `-h`, the applications will output usage and +options. + +Encoding Tool +------------- + +`draco_encoder` will read OBJ or PLY files as input, and output Draco-encoded +files. We have included Stanford's [Bunny] mesh for testing. The basic command +line looks like this: + +~~~~~ bash +./draco_encoder -i testdata/bun_zipper.ply -o out.drc +~~~~~ + +A value of `0` for the quantization parameter will not perform any quantization +on the specified attribute. Any value other than `0` will quantize the input +values for the specified attribute to that number of bits. For example: + +~~~~~ bash +./draco_encoder -i testdata/bun_zipper.ply -o out.drc -qp 14 +~~~~~ + +will quantize the positions to 14 bits (default is 11 for the position +coordinates). + +In general, the more you quantize your attributes the better compression rate +you will get. It is up to your project to decide how much deviation it will +tolerate. In general, most projects can set quantization values of about `11` +without any noticeable difference in quality. + +The compression level (`-cl`) parameter turns on/off different compression +features. + +~~~~~ bash +./draco_encoder -i testdata/bun_zipper.ply -o out.drc -cl 8 +~~~~~ + +In general, the highest setting, `10`, will have the most compression but +worst decompression speed. `0` will have the least compression, but best +decompression speed. The default setting is `7`. + +Encoding Point Clouds +--------------------- + +You can encode point cloud data with `draco_encoder` by specifying the +`-point_cloud` parameter. If you specify the `-point_cloud` parameter with a +mesh input file, `draco_encoder` will ignore the connectivity data and encode +the positions from the mesh file. + +~~~~~ bash +./draco_encoder -point_cloud -i testdata/bun_zipper.ply -o out.drc +~~~~~ + +This command line will encode the mesh input as a point cloud, even though the +input might not produce compression that is representative of other point +clouds. Specifically, one can expect much better compression rates for larger +and denser point clouds. + +Decoding Tool +------------- + +`draco_decoder` will read Draco files as input, and output OBJ or PLY files. +The basic command line looks like this: + +~~~~~ bash +./draco_decoder -i in.drc -o out.obj +~~~~~ + +C++ Decoder API +------------- + +If you'd like to add decoding to your applications you will need to include +the `draco_dec` library. In order to use the Draco decoder you need to +initialize a `DecoderBuffer` with the compressed data. Then call +`DecodeMeshFromBuffer()` to return a decoded mesh object or call +`DecodePointCloudFromBuffer()` to return a decoded `PointCloud` object. For +example: + +~~~~~ cpp +draco::DecoderBuffer buffer; +buffer.Init(data.data(), data.size()); + +const draco::EncodedGeometryType geom_type = + draco::GetEncodedGeometryType(&buffer); +if (geom_type == draco::TRIANGULAR_MESH) { + unique_ptr mesh = draco::DecodeMeshFromBuffer(&buffer); +} else if (geom_type == draco::POINT_CLOUD) { + unique_ptr pc = draco::DecodePointCloudFromBuffer(&buffer); +} +~~~~~ + +Please see [src/draco/mesh/mesh.h](src/draco/mesh/mesh.h) for the full `Mesh` class interface and +[src/draco/point_cloud/point_cloud.h](src/draco/point_cloud/point_cloud.h) for the full `PointCloud` class interface. + + +Javascript Encoder API +---------------------- +The Javascript encoder is located in `javascript/draco_encoder.js`. The encoder +API can be used to compress mesh and point cloud. In order to use the encoder, +you need to first create an instance of `DracoEncoderModule`. Then use this +instance to create `MeshBuilder` and `Encoder` objects. `MeshBuilder` is used +to construct a mesh from geometry data that could be later compressed by +`Encoder`. First create a mesh object using `new encoderModule.Mesh()` . Then, +use `AddFacesToMesh()` to add indices to the mesh and use +`AddFloatAttributeToMesh()` to add attribute data to the mesh, e.g. position, +normal, color and texture coordinates. After a mesh is constructed, you could +then use `EncodeMeshToDracoBuffer()` to compress the mesh. For example: + +~~~~~ js +const mesh = { + indices : new Uint32Array(indices), + vertices : new Float32Array(vertices), + normals : new Float32Array(normals) +}; + +const encoderModule = DracoEncoderModule(); +const encoder = new encoderModule.Encoder(); +const meshBuilder = new encoderModule.MeshBuilder(); +const dracoMesh = new encoderModule.Mesh(); + +const numFaces = mesh.indices.length / 3; +const numPoints = mesh.vertices.length; +meshBuilder.AddFacesToMesh(dracoMesh, numFaces, mesh.indices); + +meshBuilder.AddFloatAttributeToMesh(dracoMesh, encoderModule.POSITION, + numPoints, 3, mesh.vertices); +if (mesh.hasOwnProperty('normals')) { + meshBuilder.AddFloatAttributeToMesh( + dracoMesh, encoderModule.NORMAL, numPoints, 3, mesh.normals); +} +if (mesh.hasOwnProperty('colors')) { + meshBuilder.AddFloatAttributeToMesh( + dracoMesh, encoderModule.COLOR, numPoints, 3, mesh.colors); +} +if (mesh.hasOwnProperty('texcoords')) { + meshBuilder.AddFloatAttributeToMesh( + dracoMesh, encoderModule.TEX_COORD, numPoints, 3, mesh.texcoords); +} + +if (method === "edgebreaker") { + encoder.SetEncodingMethod(encoderModule.MESH_EDGEBREAKER_ENCODING); +} else if (method === "sequential") { + encoder.SetEncodingMethod(encoderModule.MESH_SEQUENTIAL_ENCODING); +} + +const encodedData = new encoderModule.DracoInt8Array(); +// Use default encoding setting. +const encodedLen = encoder.EncodeMeshToDracoBuffer(dracoMesh, + encodedData); +encoderModule.destroy(dracoMesh); +encoderModule.destroy(encoder); +encoderModule.destroy(meshBuilder); + +~~~~~ +Please see [src/draco/javascript/emscripten/draco_web_encoder.idl](src/draco/javascript/emscripten/draco_web_encoder.idl) for the full API. + +Javascript Decoder API +---------------------- + +The Javascript decoder is located in [javascript/draco_decoder.js](javascript/draco_decoder.js). The +Javascript decoder can decode mesh and point cloud. In order to use the +decoder, you must first create an instance of `DracoDecoderModule`. The +instance is then used to create `DecoderBuffer` and `Decoder` objects. Set +the encoded data in the `DecoderBuffer`. Then call `GetEncodedGeometryType()` +to identify the type of geometry, e.g. mesh or point cloud. Then call either +`DecodeBufferToMesh()` or `DecodeBufferToPointCloud()`, which will return +a Mesh object or a point cloud. For example: + +~~~~~ js +// Create the Draco decoder. +const decoderModule = DracoDecoderModule(); +const buffer = new decoderModule.DecoderBuffer(); +buffer.Init(byteArray, byteArray.length); + +// Create a buffer to hold the encoded data. +const decoder = new decoderModule.Decoder(); +const geometryType = decoder.GetEncodedGeometryType(buffer); + +// Decode the encoded geometry. +let outputGeometry; +let status; +if (geometryType == decoderModule.TRIANGULAR_MESH) { + outputGeometry = new decoderModule.Mesh(); + status = decoder.DecodeBufferToMesh(buffer, outputGeometry); +} else { + outputGeometry = new decoderModule.PointCloud(); + status = decoder.DecodeBufferToPointCloud(buffer, outputGeometry); +} + +// You must explicitly delete objects created from the DracoDecoderModule +// or Decoder. +decoderModule.destroy(outputGeometry); +decoderModule.destroy(decoder); +decoderModule.destroy(buffer); +~~~~~ + +Please see [src/draco/javascript/emscripten/draco_web_decoder.idl](src/draco/javascript/emscripten/draco_web_decoder.idl) for the full API. + +Javascript Decoder Performance +------------------------------ + +The Javascript decoder is built with dynamic memory. This will let the decoder +work with all of the compressed data. But this option is not the fastest. +Pre-allocating the memory sees about a 2x decoder speed improvement. If you +know all of your project's memory requirements, you can turn on static memory +by changing `CMakeLists.txt` accordingly. + +Metadata API +------------ +Starting from v1.0, Draco provides metadata functionality for encoding data +other than geometry. It could be used to encode any custom data along with the +geometry. For example, we can enable metadata functionality to encode the name +of attributes, name of sub-objects and customized information. +For one mesh and point cloud, it can have one top-level geometry metadata class. +The top-level metadata then can have hierarchical metadata. Other than that, +the top-level metadata can have metadata for each attribute which is called +attribute metadata. The attribute metadata should be initialized with the +correspondent attribute id within the mesh. The metadata API is provided both +in C++ and Javascript. +For example, to add metadata in C++: + +~~~~~ cpp +draco::PointCloud pc; +// Add metadata for the geometry. +std::unique_ptr metadata = + std::unique_ptr(new draco::GeometryMetadata()); +metadata->AddEntryString("description", "This is an example."); +pc.AddMetadata(std::move(metadata)); + +// Add metadata for attributes. +draco::GeometryAttribute pos_att; +pos_att.Init(draco::GeometryAttribute::POSITION, nullptr, 3, + draco::DT_FLOAT32, false, 12, 0); +const uint32_t pos_att_id = pc.AddAttribute(pos_att, false, 0); + +std::unique_ptr pos_metadata = + std::unique_ptr( + new draco::AttributeMetadata(pos_att_id)); +pos_metadata->AddEntryString("name", "position"); + +// Directly add attribute metadata to geometry. +// You can do this without explicitly add |GeometryMetadata| to mesh. +pc.AddAttributeMetadata(pos_att_id, std::move(pos_metadata)); +~~~~~ + +To read metadata from a geometry in C++: + +~~~~~ cpp +// Get metadata for the geometry. +const draco::GeometryMetadata *pc_metadata = pc.GetMetadata(); + +// Request metadata for a specific attribute. +const draco::AttributeMetadata *requested_pos_metadata = + pc.GetAttributeMetadataByStringEntry("name", "position"); +~~~~~ + +Please see [src/draco/metadata](src/draco/metadata) and [src/draco/point_cloud](src/draco/point_cloud) for the full API. + +NPM Package +----------- +Draco NPM NodeJS package is located in [javascript/npm/draco3d](javascript/npm/draco3d). Please see the +doc in the folder for detailed usage. + +three.js Renderer Example +------------------------- + +Here's an [example] of a geometric compressed with Draco loaded via a +Javascript decoder using the `three.js` renderer. + +Please see the [javascript/example/README.md](javascript/example/README.md) file for more information. + +Support +======= + +For questions/comments please email + +If you have found an error in this library, please file an issue at + + +Patches are encouraged, and may be submitted by forking this project and +submitting a pull request through GitHub. See [CONTRIBUTING] for more detail. + +License +======= +Licensed under the Apache License, Version 2.0 (the "License"); you may not +use this file except in compliance with the License. You may obtain a copy of +the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations under +the License. + +References +========== +[example]:https://storage.googleapis.com/demos.webmproject.org/draco/draco_loader_throw.html +[meshes]: https://en.wikipedia.org/wiki/Polygon_mesh +[point clouds]: https://en.wikipedia.org/wiki/Point_cloud +[Bunny]: https://graphics.stanford.edu/data/3Dscanrep/ +[CONTRIBUTING]: https://raw.githubusercontent.com/google/draco/master/CONTRIBUTING.md + +Bunny model from Stanford's graphic department diff --git a/contrib/draco/cmake/DracoConfig.cmake b/contrib/draco/cmake/DracoConfig.cmake new file mode 100644 index 000000000..be5e1faef --- /dev/null +++ b/contrib/draco/cmake/DracoConfig.cmake @@ -0,0 +1,3 @@ +@PACKAGE_INIT@ +set_and_check(draco_INCLUDE_DIR "@PACKAGE_draco_include_install_dir@") +set_and_check(draco_LIBRARY_DIR "@PACKAGE_draco_lib_install_dir@") diff --git a/contrib/draco/cmake/FindDraco.cmake b/contrib/draco/cmake/FindDraco.cmake new file mode 100644 index 000000000..0a9193065 --- /dev/null +++ b/contrib/draco/cmake/FindDraco.cmake @@ -0,0 +1,56 @@ +# Finddraco +# +# Locates draco and sets the following variables: +# +# draco_FOUND draco_INCLUDE_DIRS draco_LIBARY_DIRS draco_LIBRARIES +# draco_VERSION_STRING +# +# draco_FOUND is set to YES only when all other variables are successfully +# configured. + +unset(draco_FOUND) +unset(draco_INCLUDE_DIRS) +unset(draco_LIBRARY_DIRS) +unset(draco_LIBRARIES) +unset(draco_VERSION_STRING) + +mark_as_advanced(draco_FOUND) +mark_as_advanced(draco_INCLUDE_DIRS) +mark_as_advanced(draco_LIBRARY_DIRS) +mark_as_advanced(draco_LIBRARIES) +mark_as_advanced(draco_VERSION_STRING) + +set(draco_version_file_no_prefix "draco/src/draco/core/draco_version.h") + +# Set draco_INCLUDE_DIRS +find_path(draco_INCLUDE_DIRS NAMES "${draco_version_file_no_prefix}") + +# Extract the version string from draco_version.h. +if(draco_INCLUDE_DIRS) + set(draco_version_file + "${draco_INCLUDE_DIRS}/draco/src/draco/core/draco_version.h") + file(STRINGS "${draco_version_file}" draco_version REGEX "kdracoVersion") + list(GET draco_version 0 draco_version) + string(REPLACE "static const char kdracoVersion[] = " "" draco_version + "${draco_version}") + string(REPLACE ";" "" draco_version "${draco_version}") + string(REPLACE "\"" "" draco_version "${draco_version}") + set(draco_VERSION_STRING ${draco_version}) +endif() + +# Find the library. +if(BUILD_SHARED_LIBS) + find_library(draco_LIBRARIES NAMES draco.dll libdraco.dylib libdraco.so) +else() + find_library(draco_LIBRARIES NAMES draco.lib libdraco.a) +endif() + +# Store path to library. +get_filename_component(draco_LIBRARY_DIRS ${draco_LIBRARIES} DIRECTORY) + +if(draco_INCLUDE_DIRS + AND draco_LIBRARY_DIRS + AND draco_LIBRARIES + AND draco_VERSION_STRING) + set(draco_FOUND YES) +endif() diff --git a/contrib/draco/cmake/compiler_flags.cmake b/contrib/draco/cmake/compiler_flags.cmake new file mode 100644 index 000000000..8750e6f7d --- /dev/null +++ b/contrib/draco/cmake/compiler_flags.cmake @@ -0,0 +1,220 @@ +if(DRACO_CMAKE_COMPILER_FLAGS_CMAKE_) + return() +endif() +set(DRACO_CMAKE_COMPILER_FLAGS_CMAKE_ 1) + +include(CheckCCompilerFlag) +include(CheckCXXCompilerFlag) +include("${draco_root}/cmake/compiler_tests.cmake") + +# Strings used to cache failed C/CXX flags. +set(DRACO_FAILED_C_FLAGS) +set(DRACO_FAILED_CXX_FLAGS) + +# Checks C compiler for support of $c_flag. Adds $c_flag to $CMAKE_C_FLAGS when +# the compile test passes. Caches $c_flag in $DRACO_FAILED_C_FLAGS when the test +# fails. +macro(add_c_flag_if_supported c_flag) + unset(C_FLAG_FOUND CACHE) + string(FIND "${CMAKE_C_FLAGS}" "${c_flag}" C_FLAG_FOUND) + unset(C_FLAG_FAILED CACHE) + string(FIND "${DRACO_FAILED_C_FLAGS}" "${c_flag}" C_FLAG_FAILED) + + if(${C_FLAG_FOUND} EQUAL -1 AND ${C_FLAG_FAILED} EQUAL -1) + unset(C_FLAG_SUPPORTED CACHE) + message("Checking C compiler flag support for: " ${c_flag}) + check_c_compiler_flag("${c_flag}" C_FLAG_SUPPORTED) + if(${C_FLAG_SUPPORTED}) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${c_flag}" CACHE STRING "") + else() + set(DRACO_FAILED_C_FLAGS + "${DRACO_FAILED_C_FLAGS} ${c_flag}" + CACHE STRING "" FORCE) + endif() + endif() +endmacro() + +# Checks C++ compiler for support of $cxx_flag. Adds $cxx_flag to +# $CMAKE_CXX_FLAGS when the compile test passes. Caches $c_flag in +# $DRACO_FAILED_CXX_FLAGS when the test fails. +macro(add_cxx_flag_if_supported cxx_flag) + unset(CXX_FLAG_FOUND CACHE) + string(FIND "${CMAKE_CXX_FLAGS}" "${cxx_flag}" CXX_FLAG_FOUND) + unset(CXX_FLAG_FAILED CACHE) + string(FIND "${DRACO_FAILED_CXX_FLAGS}" "${cxx_flag}" CXX_FLAG_FAILED) + + if(${CXX_FLAG_FOUND} EQUAL -1 AND ${CXX_FLAG_FAILED} EQUAL -1) + unset(CXX_FLAG_SUPPORTED CACHE) + message("Checking CXX compiler flag support for: " ${cxx_flag}) + check_cxx_compiler_flag("${cxx_flag}" CXX_FLAG_SUPPORTED) + if(${CXX_FLAG_SUPPORTED}) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${cxx_flag}" CACHE STRING "") + else() + set(DRACO_FAILED_CXX_FLAGS + "${DRACO_FAILED_CXX_FLAGS} ${cxx_flag}" + CACHE STRING "" FORCE) + endif() + endif() +endmacro() + +# Convenience method for adding a flag to both the C and C++ compiler command +# lines. +macro(add_compiler_flag_if_supported flag) + add_c_flag_if_supported(${flag}) + add_cxx_flag_if_supported(${flag}) +endmacro() + +# Checks C compiler for support of $c_flag and terminates generation when +# support is not present. +macro(require_c_flag c_flag update_c_flags) + unset(C_FLAG_FOUND CACHE) + string(FIND "${CMAKE_C_FLAGS}" "${c_flag}" C_FLAG_FOUND) + + if(${C_FLAG_FOUND} EQUAL -1) + unset(HAVE_C_FLAG CACHE) + message("Checking C compiler flag support for: " ${c_flag}) + check_c_compiler_flag("${c_flag}" HAVE_C_FLAG) + if(NOT ${HAVE_C_FLAG}) + message( + FATAL_ERROR "${PROJECT_NAME} requires support for C flag: ${c_flag}.") + endif() + if(${update_c_flags}) + set(CMAKE_C_FLAGS "${c_flag} ${CMAKE_C_FLAGS}" CACHE STRING "" FORCE) + endif() + endif() +endmacro() + +# Checks CXX compiler for support of $cxx_flag and terminates generation when +# support is not present. +macro(require_cxx_flag cxx_flag update_cxx_flags) + unset(CXX_FLAG_FOUND CACHE) + string(FIND "${CMAKE_CXX_FLAGS}" "${cxx_flag}" CXX_FLAG_FOUND) + + if(${CXX_FLAG_FOUND} EQUAL -1) + unset(HAVE_CXX_FLAG CACHE) + message("Checking CXX compiler flag support for: " ${cxx_flag}) + check_cxx_compiler_flag("${cxx_flag}" HAVE_CXX_FLAG) + if(NOT ${HAVE_CXX_FLAG}) + message( + FATAL_ERROR + "${PROJECT_NAME} requires support for CXX flag: ${cxx_flag}.") + endif() + if(${update_cxx_flags}) + set(CMAKE_CXX_FLAGS + "${cxx_flag} ${CMAKE_CXX_FLAGS}" + CACHE STRING "" FORCE) + endif() + endif() +endmacro() + +# Checks for support of $flag by both the C and CXX compilers. Terminates +# generation when support is not present in both compilers. +macro(require_compiler_flag flag update_cmake_flags) + require_c_flag(${flag} ${update_cmake_flags}) + require_cxx_flag(${flag} ${update_cmake_flags}) +endmacro() + +# Checks only non-MSVC targets for support of $c_flag and terminates generation +# when support is not present. +macro(require_c_flag_nomsvc c_flag update_c_flags) + if(NOT MSVC) + require_c_flag(${c_flag} ${update_c_flags}) + endif() +endmacro() + +# Checks only non-MSVC targets for support of $cxx_flag and terminates +# generation when support is not present. +macro(require_cxx_flag_nomsvc cxx_flag update_cxx_flags) + if(NOT MSVC) + require_cxx_flag(${cxx_flag} ${update_cxx_flags}) + endif() +endmacro() + +# Checks only non-MSVC targets for support of $flag by both the C and CXX +# compilers. Terminates generation when support is not present in both +# compilers. +macro(require_compiler_flag_nomsvc flag update_cmake_flags) + require_c_flag_nomsvc(${flag} ${update_cmake_flags}) + require_cxx_flag_nomsvc(${flag} ${update_cmake_flags}) +endmacro() + +# Adds $flag to assembler command line. +macro(append_as_flag flag) + unset(AS_FLAG_FOUND CACHE) + string(FIND "${DRACO_AS_FLAGS}" "${flag}" AS_FLAG_FOUND) + + if(${AS_FLAG_FOUND} EQUAL -1) + set(DRACO_AS_FLAGS "${DRACO_AS_FLAGS} ${flag}") + endif() +endmacro() + +# Adds $flag to the C compiler command line. +macro(append_c_flag flag) + unset(C_FLAG_FOUND CACHE) + string(FIND "${CMAKE_C_FLAGS}" "${flag}" C_FLAG_FOUND) + + if(${C_FLAG_FOUND} EQUAL -1) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}") + endif() +endmacro() + +# Adds $flag to the CXX compiler command line. +macro(append_cxx_flag flag) + unset(CXX_FLAG_FOUND CACHE) + string(FIND "${CMAKE_CXX_FLAGS}" "${flag}" CXX_FLAG_FOUND) + + if(${CXX_FLAG_FOUND} EQUAL -1) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}") + endif() +endmacro() + +# Adds $flag to the C and CXX compiler command lines. +macro(append_compiler_flag flag) + append_c_flag(${flag}) + append_cxx_flag(${flag}) +endmacro() + +# Adds $flag to the executable linker command line. +macro(append_exe_linker_flag flag) + unset(LINKER_FLAG_FOUND CACHE) + string(FIND "${CMAKE_EXE_LINKER_FLAGS}" "${flag}" LINKER_FLAG_FOUND) + + if(${LINKER_FLAG_FOUND} EQUAL -1) + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${flag}") + endif() +endmacro() + +# Adds $flag to the link flags for $target. +function(append_link_flag_to_target target flags) + unset(target_link_flags) + get_target_property(target_link_flags ${target} LINK_FLAGS) + + if(target_link_flags) + unset(link_flag_found) + string(FIND "${target_link_flags}" "${flags}" link_flag_found) + + if(NOT ${link_flag_found} EQUAL -1) + return() + endif() + + set(target_link_flags "${target_link_flags} ${flags}") + else() + set(target_link_flags "${flags}") + endif() + + set_target_properties(${target} PROPERTIES LINK_FLAGS ${target_link_flags}) +endfunction() + +# Adds $flag to executable linker flags, and makes sure C/CXX builds still work. +macro(require_linker_flag flag) + append_exe_linker_flag(${flag}) + + unset(c_passed) + draco_check_c_compiles("LINKER_FLAG_C_TEST(${flag})" "" c_passed) + unset(cxx_passed) + draco_check_cxx_compiles("LINKER_FLAG_CXX_TEST(${flag})" "" cxx_passed) + + if(NOT c_passed OR NOT cxx_passed) + message(FATAL_ERROR "Linker flag test for ${flag} failed.") + endif() +endmacro() diff --git a/contrib/draco/cmake/compiler_tests.cmake b/contrib/draco/cmake/compiler_tests.cmake new file mode 100644 index 000000000..e781a6537 --- /dev/null +++ b/contrib/draco/cmake/compiler_tests.cmake @@ -0,0 +1,103 @@ +if(DRACO_CMAKE_COMPILER_TESTS_CMAKE_) + return() +endif() +set(DRACO_CMAKE_COMPILER_TESTS_CMAKE_ 1) + +include(CheckCSourceCompiles) +include(CheckCXXSourceCompiles) + +# The basic main() macro used in all compile tests. +set(DRACO_C_MAIN "\nint main(void) { return 0; }") +set(DRACO_CXX_MAIN "\nint main() { return 0; }") + +# Strings containing the names of passed and failed tests. +set(DRACO_C_PASSED_TESTS) +set(DRACO_C_FAILED_TESTS) +set(DRACO_CXX_PASSED_TESTS) +set(DRACO_CXX_FAILED_TESTS) + +macro(draco_push_var var new_value) + set(SAVED_${var} ${var}) + set(${var} ${new_value}) +endmacro() + +macro(draco_pop_var var) + set(var ${SAVED_${var}}) + unset(SAVED_${var}) +endmacro() + +# Confirms $test_source compiles and stores $test_name in one of +# $DRACO_C_PASSED_TESTS or $DRACO_C_FAILED_TESTS depending on out come. When the +# test passes $result_var is set to 1. When it fails $result_var is unset. The +# test is not run if the test name is found in either of the passed or failed +# test variables. +macro(draco_check_c_compiles test_name test_source result_var) + unset(C_TEST_PASSED CACHE) + unset(C_TEST_FAILED CACHE) + string(FIND "${DRACO_C_PASSED_TESTS}" "${test_name}" C_TEST_PASSED) + string(FIND "${DRACO_C_FAILED_TESTS}" "${test_name}" C_TEST_FAILED) + if(${C_TEST_PASSED} EQUAL -1 AND ${C_TEST_FAILED} EQUAL -1) + unset(C_TEST_COMPILED CACHE) + message("Running C compiler test: ${test_name}") + check_c_source_compiles("${test_source} ${DRACO_C_MAIN}" C_TEST_COMPILED) + set(${result_var} ${C_TEST_COMPILED}) + + if(${C_TEST_COMPILED}) + set(DRACO_C_PASSED_TESTS "${DRACO_C_PASSED_TESTS} ${test_name}") + else() + set(DRACO_C_FAILED_TESTS "${DRACO_C_FAILED_TESTS} ${test_name}") + message("C Compiler test ${test_name} failed.") + endif() + elseif(NOT ${C_TEST_PASSED} EQUAL -1) + set(${result_var} 1) + else() # ${C_TEST_FAILED} NOT EQUAL -1 + unset(${result_var}) + endif() +endmacro() + +# Confirms $test_source compiles and stores $test_name in one of +# $DRACO_CXX_PASSED_TESTS or $DRACO_CXX_FAILED_TESTS depending on out come. When +# the test passes $result_var is set to 1. When it fails $result_var is unset. +# The test is not run if the test name is found in either of the passed or +# failed test variables. +macro(draco_check_cxx_compiles test_name test_source result_var) + unset(CXX_TEST_PASSED CACHE) + unset(CXX_TEST_FAILED CACHE) + string(FIND "${DRACO_CXX_PASSED_TESTS}" "${test_name}" CXX_TEST_PASSED) + string(FIND "${DRACO_CXX_FAILED_TESTS}" "${test_name}" CXX_TEST_FAILED) + if(${CXX_TEST_PASSED} EQUAL -1 AND ${CXX_TEST_FAILED} EQUAL -1) + unset(CXX_TEST_COMPILED CACHE) + message("Running CXX compiler test: ${test_name}") + check_cxx_source_compiles("${test_source} ${DRACO_CXX_MAIN}" + CXX_TEST_COMPILED) + set(${result_var} ${CXX_TEST_COMPILED}) + + if(${CXX_TEST_COMPILED}) + set(DRACO_CXX_PASSED_TESTS "${DRACO_CXX_PASSED_TESTS} ${test_name}") + else() + set(DRACO_CXX_FAILED_TESTS "${DRACO_CXX_FAILED_TESTS} ${test_name}") + message("CXX Compiler test ${test_name} failed.") + endif() + elseif(NOT ${CXX_TEST_PASSED} EQUAL -1) + set(${result_var} 1) + else() # ${CXX_TEST_FAILED} NOT EQUAL -1 + unset(${result_var}) + endif() +endmacro() + +# Convenience macro that confirms $test_source compiles as C and C++. +# $result_var is set to 1 when both tests are successful, and 0 when one or both +# tests fail. Note: This macro is intended to be used to write to result +# variables that are expanded via configure_file(). $result_var is set to 1 or 0 +# to allow direct usage of the value in generated source files. +macro(draco_check_source_compiles test_name test_source result_var) + unset(C_PASSED) + unset(CXX_PASSED) + draco_check_c_compiles(${test_name} ${test_source} C_PASSED) + draco_check_cxx_compiles(${test_name} ${test_source} CXX_PASSED) + if(${C_PASSED} AND ${CXX_PASSED}) + set(${result_var} 1) + else() + set(${result_var} 0) + endif() +endmacro() diff --git a/contrib/draco/cmake/draco-config.cmake.template b/contrib/draco/cmake/draco-config.cmake.template new file mode 100644 index 000000000..ca4a456bf --- /dev/null +++ b/contrib/draco/cmake/draco-config.cmake.template @@ -0,0 +1,2 @@ +set(DRACO_INCLUDE_DIRS "@DRACO_INCLUDE_DIRS@") +set(DRACO_LIBRARIES "draco") diff --git a/contrib/draco/cmake/draco.pc.template b/contrib/draco/cmake/draco.pc.template new file mode 100644 index 000000000..b8ae48212 --- /dev/null +++ b/contrib/draco/cmake/draco.pc.template @@ -0,0 +1,11 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: @PROJECT_NAME@ +Description: Draco geometry de(com)pression library. +Version: @DRACO_VERSION@ +Cflags: -I${includedir} +Libs: -L${libdir} -ldraco +Libs.private: @CMAKE_THREAD_LIBS_INIT@ diff --git a/contrib/draco/cmake/draco_build_definitions.cmake b/contrib/draco/cmake/draco_build_definitions.cmake new file mode 100644 index 000000000..c1ada6206 --- /dev/null +++ b/contrib/draco/cmake/draco_build_definitions.cmake @@ -0,0 +1,117 @@ +if(DRACO_CMAKE_DRACO_BUILD_DEFINITIONS_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_BUILD_DEFINITIONS_CMAKE_ +set(DRACO_CMAKE_DRACO_BUILD_DEFINITIONS_CMAKE_ 1) + +# Utility for controlling the main draco library dependency. This changes in +# shared builds, and when an optional target requires a shared library build. +macro(set_draco_target) + if(MSVC OR WIN32) + set(draco_dependency draco) + set(draco_plugin_dependency ${draco_dependency}) + else() + if(BUILD_SHARED_LIBS) + set(draco_dependency draco_shared) + else() + set(draco_dependency draco_static) + endif() + set(draco_plugin_dependency draco_static) + endif() + + if(BUILD_SHARED_LIBS) + set(CMAKE_POSITION_INDEPENDENT_CODE ON) + endif() +endmacro() + +# Configures flags and sets build system globals. +macro(draco_set_build_definitions) + string(TOLOWER "${CMAKE_BUILD_TYPE}" build_type_lowercase) + + if(build_type_lowercase MATCHES "rel" AND DRACO_FAST) + if(MSVC) + list(APPEND draco_msvc_cxx_flags "/Ox") + else() + list(APPEND draco_base_cxx_flags "-O3") + endif() + endif() + + draco_load_version_info() + set(DRACO_SOVERSION 1) + + list(APPEND draco_include_paths "${draco_root}" "${draco_root}/src" + "${draco_build}") + + if(DRACO_ABSL) + list(APPEND draco_include_path "${draco_root}/third_party/abseil-cpp") + endif() + + + list(APPEND draco_gtest_include_paths + "${draco_root}/../googletest/googlemock/include" + "${draco_root}/../googletest/googlemock" + "${draco_root}/../googletest/googletest/include" + "${draco_root}/../googletest/googletest") + list(APPEND draco_test_include_paths ${draco_include_paths} + ${draco_gtest_include_paths}) + list(APPEND draco_defines "DRACO_CMAKE=1" + "DRACO_FLAGS_SRCDIR=\"${draco_root}\"" + "DRACO_FLAGS_TMPDIR=\"/tmp\"") + + if(MSVC OR WIN32) + list(APPEND draco_defines "_CRT_SECURE_NO_DEPRECATE=1" "NOMINMAX=1") + + if(BUILD_SHARED_LIBS) + set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS TRUE) + endif() + endif() + + if(ANDROID) + if(CMAKE_ANDROID_ARCH_ABI STREQUAL "armeabi-v7a") + set(CMAKE_ANDROID_ARM_MODE ON) + endif() + endif() + + set_draco_target() + + if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "6") + # Quiet warnings in copy-list-initialization where {} elision has always + # been allowed. + list(APPEND draco_clang_cxx_flags "-Wno-missing-braces") + endif() + endif() + + if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL "7") + if(CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7") + # Quiet gcc 6 vs 7 abi warnings: + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=77728 + list(APPEND draco_base_cxx_flags "-Wno-psabi") + list(APPEND ABSL_GCC_FLAGS "-Wno-psabi") + endif() + endif() + endif() + + # Source file names ending in these suffixes will have the appropriate + # compiler flags added to their compile commands to enable intrinsics. + set(draco_neon_source_file_suffix "neon.cc") + set(draco_sse4_source_file_suffix "sse4.cc") + + if((${CMAKE_CXX_COMPILER_ID} + STREQUAL + "GNU" + AND ${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 5) + OR (${CMAKE_CXX_COMPILER_ID} + STREQUAL + "Clang" + AND ${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 4)) + message( + WARNING "GNU/GCC < v5 or Clang/LLVM < v4, ENABLING COMPATIBILITY MODE.") + draco_enable_feature(FEATURE "DRACO_OLD_GCC") + endif() + + if(EMSCRIPTEN) + draco_check_emscripten_environment() + draco_get_required_emscripten_flags(FLAG_LIST_VAR draco_base_cxx_flags) + endif() +endmacro() diff --git a/contrib/draco/cmake/draco_cpu_detection.cmake b/contrib/draco/cmake/draco_cpu_detection.cmake new file mode 100644 index 000000000..96e4a289b --- /dev/null +++ b/contrib/draco/cmake/draco_cpu_detection.cmake @@ -0,0 +1,28 @@ +if(DRACO_CMAKE_DRACO_CPU_DETECTION_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_CPU_DETECTION_CMAKE_ +set(DRACO_CMAKE_DRACO_CPU_DETECTION_CMAKE_ 1) + +# Detect optimizations available for the current target CPU. +macro(draco_optimization_detect) + if(DRACO_ENABLE_OPTIMIZATIONS) + string(TOLOWER "${CMAKE_SYSTEM_PROCESSOR}" cpu_lowercase) + if(cpu_lowercase MATCHES "^arm|^aarch64") + set(draco_have_neon ON) + elseif(cpu_lowercase MATCHES "^x86|amd64") + set(draco_have_sse4 ON) + endif() + endif() + + if(draco_have_neon AND DRACO_ENABLE_NEON) + list(APPEND draco_defines "DRACO_ENABLE_NEON=1") + else() + list(APPEND draco_defines "DRACO_ENABLE_NEON=0") + endif() + + if(draco_have_sse4 AND DRACO_ENABLE_SSE4_1) + list(APPEND draco_defines "DRACO_ENABLE_SSE4_1=1") + else() + list(APPEND draco_defines "DRACO_ENABLE_SSE4_1=0") + endif() +endmacro() diff --git a/contrib/draco/cmake/draco_emscripten.cmake b/contrib/draco/cmake/draco_emscripten.cmake new file mode 100644 index 000000000..10c935043 --- /dev/null +++ b/contrib/draco/cmake/draco_emscripten.cmake @@ -0,0 +1,185 @@ +if(DRACO_CMAKE_DRACO_EMSCRIPTEN_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_EMSCRIPTEN_CMAKE_ + +# Checks environment for Emscripten prerequisites. +macro(draco_check_emscripten_environment) + if(NOT PYTHONINTERP_FOUND) + message( + FATAL_ERROR + "Python required for Emscripten builds, but cmake cannot find it.") + endif() + if(NOT EXISTS "$ENV{EMSCRIPTEN}") + message( + FATAL_ERROR + "The EMSCRIPTEN environment variable must be set. See README.md.") + endif() +endmacro() + +# Obtains the required Emscripten flags for Draco targets. +macro(draco_get_required_emscripten_flags) + set(em_FLAG_LIST_VAR) + set(em_flags) + set(em_single_arg_opts FLAG_LIST_VAR) + set(em_multi_arg_opts) + cmake_parse_arguments(em "${em_flags}" "${em_single_arg_opts}" + "${em_multi_arg_opts}" ${ARGN}) + if(NOT em_FLAG_LIST_VAR) + message(FATAL "draco_get_required_emscripten_flags: FLAG_LIST_VAR required") + endif() + + if(DRACO_JS_GLUE) + unset(required_flags) + list(APPEND ${em_FLAG_LIST_VAR} "-sALLOW_MEMORY_GROWTH=1") + list(APPEND ${em_FLAG_LIST_VAR} "-Wno-almost-asm") + list(APPEND ${em_FLAG_LIST_VAR} "--memory-init-file" "0") + list(APPEND ${em_FLAG_LIST_VAR} "-fno-omit-frame-pointer") + list(APPEND ${em_FLAG_LIST_VAR} "-sMODULARIZE=1") + list(APPEND ${em_FLAG_LIST_VAR} "-sNO_FILESYSTEM=1") + list(APPEND ${em_FLAG_LIST_VAR} "-sEXPORTED_RUNTIME_METHODS=[]") + list(APPEND ${em_FLAG_LIST_VAR} "-sPRECISE_F32=1") + list(APPEND ${em_FLAG_LIST_VAR} "-sNODEJS_CATCH_EXIT=0") + list(APPEND ${em_FLAG_LIST_VAR} "-sNODEJS_CATCH_REJECTION=0") + + if(DRACO_FAST) + list(APPEND ${em_FLAG_LIST_VAR} "--llvm-lto" "1") + endif() + if(DRACO_WASM) + list(APPEND ${em_FLAG_LIST_VAR} "-sWASM=1") + else() + list(APPEND ${em_FLAG_LIST_VAR} "-sWASM=0") + endif() + if(DRACO_IE_COMPATIBLE) + list(APPEND ${em_FLAG_LIST_VAR} "-sLEGACY_VM_SUPPORT=1") + endif() + endif() +endmacro() + +# Macro for generating C++ glue code from IDL for Emscripten targets. Executes +# python to generate the C++ binding, and establishes dendency: $OUTPUT_PATH.cpp +# on $INPUT_IDL. +macro(draco_generate_emscripten_glue) + set(glue_flags) + set(glue_single_arg_opts INPUT_IDL OUTPUT_PATH) + set(glue_multi_arg_opts) + cmake_parse_arguments(glue "${glue_flags}" "${glue_single_arg_opts}" + "${glue_multi_arg_opts}" ${ARGN}) + + if(DRACO_VERBOSE GREATER 1) + message("--------- draco_generate_emscripten_glue -----------\n" + "glue_INPUT_IDL=${glue_INPUT_IDL}\n" + "glue_OUTPUT_PATH=${glue_OUTPUT_PATH}\n" ] + "----------------------------------------------------\n") + endif() + + if(NOT glue_INPUT_IDL OR NOT glue_OUTPUT_PATH) + message( + FATAL_ERROR + "draco_generate_emscripten_glue: INPUT_IDL and OUTPUT_PATH required.") + endif() + + # Generate the glue source. + execute_process(COMMAND ${PYTHON_EXECUTABLE} + $ENV{EMSCRIPTEN}/tools/webidl_binder.py + ${glue_INPUT_IDL} ${glue_OUTPUT_PATH}) + if(NOT EXISTS "${glue_OUTPUT_PATH}.cpp") + message(FATAL_ERROR "JS glue generation failed for ${glue_INPUT_IDL}.") + endif() + + # Create a dependency so that it regenerated on edits. + add_custom_command(OUTPUT "${glue_OUTPUT_PATH}.cpp" + COMMAND ${PYTHON_EXECUTABLE} + $ENV{EMSCRIPTEN}/tools/webidl_binder.py + ${glue_INPUT_IDL} ${glue_OUTPUT_PATH} + DEPENDS ${draco_js_dec_idl} + COMMENT "Generating ${glue_OUTPUT_PATH}.cpp." + WORKING_DIRECTORY ${draco_build} + VERBATIM) +endmacro() + +# Wrapper for draco_add_executable() that handles the extra work necessary for +# emscripten targets when generating JS glue: +# +# ~~~ +# - Set source level dependency on the C++ binding. +# - Pre/Post link emscripten magic. +# +# Required args: +# - GLUE_PATH: Base path for glue file. Used to generate .cpp and .js files. +# - PRE_LINK_JS_SOURCES: em_link_pre_js() source files. +# - POST_LINK_JS_SOURCES: em_link_post_js() source files. +# Optional args: +# - FEATURES: +# ~~~ +macro(draco_add_emscripten_executable) + unset(emexe_NAME) + unset(emexe_FEATURES) + unset(emexe_SOURCES) + unset(emexe_DEFINES) + unset(emexe_INCLUDES) + unset(emexe_LINK_FLAGS) + set(optional_args) + set(single_value_args NAME GLUE_PATH) + set(multi_value_args SOURCES DEFINES FEATURES INCLUDES LINK_FLAGS + PRE_LINK_JS_SOURCES POST_LINK_JS_SOURCES) + + cmake_parse_arguments(emexe "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(NOT + (emexe_GLUE_PATH + AND emexe_POST_LINK_JS_SOURCES + AND emexe_PRE_LINK_JS_SOURCES)) + message(FATAL + "draco_add_emscripten_executable: GLUE_PATH PRE_LINK_JS_SOURCES " + "POST_LINK_JS_SOURCES args required.") + endif() + + if(DRACO_VERBOSE GREATER 1) + message("--------- draco_add_emscripten_executable ---------\n" + "emexe_NAME=${emexe_NAME}\n" + "emexe_SOURCES=${emexe_SOURCES}\n" + "emexe_DEFINES=${emexe_DEFINES}\n" + "emexe_INCLUDES=${emexe_INCLUDES}\n" + "emexe_LINK_FLAGS=${emexe_LINK_FLAGS}\n" + "emexe_GLUE_PATH=${emexe_GLUE_PATH}\n" + "emexe_FEATURES=${emexe_FEATURES}\n" + "emexe_PRE_LINK_JS_SOURCES=${emexe_PRE_LINK_JS_SOURCES}\n" + "emexe_POST_LINK_JS_SOURCES=${emexe_POST_LINK_JS_SOURCES}\n" + "----------------------------------------------------\n") + endif() + + # The Emscripten linker needs the C++ flags in addition to whatever has been + # passed in with the target. + list(APPEND emexe_LINK_FLAGS ${DRACO_CXX_FLAGS}) + + if(DRACO_GLTF) + draco_add_executable(NAME + ${emexe_NAME} + OUTPUT_NAME + ${emexe_NAME}_gltf + SOURCES + ${emexe_SOURCES} + DEFINES + ${emexe_DEFINES} + INCLUDES + ${emexe_INCLUDES} + LINK_FLAGS + ${emexe_LINK_FLAGS}) + else() + draco_add_executable(NAME ${emexe_NAME} SOURCES ${emexe_SOURCES} DEFINES + ${emexe_DEFINES} INCLUDES ${emexe_INCLUDES} LINK_FLAGS + ${emexe_LINK_FLAGS}) + endif() + + foreach(feature ${emexe_FEATURES}) + draco_enable_feature(FEATURE ${feature} TARGETS ${emexe_NAME}) + endforeach() + + set_property(SOURCE ${emexe_SOURCES} + APPEND + PROPERTY OBJECT_DEPENDS "${emexe_GLUE_PATH}.cpp") + em_link_pre_js(${emexe_NAME} ${emexe_PRE_LINK_JS_SOURCES}) + em_link_post_js(${emexe_NAME} "${emexe_GLUE_PATH}.js" + ${emexe_POST_LINK_JS_SOURCES}) +endmacro() diff --git a/contrib/draco/cmake/draco_features.cmake b/contrib/draco/cmake/draco_features.cmake new file mode 100644 index 000000000..be444bf24 --- /dev/null +++ b/contrib/draco/cmake/draco_features.cmake @@ -0,0 +1,63 @@ +if(DRACO_CMAKE_DRACO_FEATURES_CMAKE_) + return() +endif() +set(DRACO_CMAKE_DRACO_FEATURES_CMAKE_ 1) + +set(draco_features_file_name "${draco_build_dir}/draco/draco_features.h") +set(draco_features_list) + +# Macro that handles tracking of Draco preprocessor symbols for the purpose of +# producing draco_features.h. +# +# draco_enable_feature(FEATURE [TARGETS ]) FEATURE +# is required. It should be a Draco preprocessor symbol. TARGETS is optional. It +# can be one or more draco targets. +# +# When the TARGETS argument is not present the preproc symbol is added to +# draco_features.h. When it is draco_features.h is unchanged, and +# target_compile_options() is called for each target specified. +macro(draco_enable_feature) + set(def_flags) + set(def_single_arg_opts FEATURE) + set(def_multi_arg_opts TARGETS) + cmake_parse_arguments(DEF "${def_flags}" "${def_single_arg_opts}" + "${def_multi_arg_opts}" ${ARGN}) + if("${DEF_FEATURE}" STREQUAL "") + message(FATAL_ERROR "Empty FEATURE passed to draco_enable_feature().") + endif() + + # Do nothing/return early if $DEF_FEATURE is already in the list. + list(FIND draco_features_list ${DEF_FEATURE} df_index) + if(NOT df_index EQUAL -1) + return() + endif() + + list(LENGTH DEF_TARGETS df_targets_list_length) + if(${df_targets_list_length} EQUAL 0) + list(APPEND draco_features_list ${DEF_FEATURE}) + else() + foreach(target ${DEF_TARGETS}) + target_compile_definitions(${target} PRIVATE ${DEF_FEATURE}) + endforeach() + endif() +endmacro() + +# Function for generating draco_features.h. +function(draco_generate_features_h) + file(WRITE "${draco_features_file_name}.new" + "// GENERATED FILE -- DO NOT EDIT\n\n" "#ifndef DRACO_FEATURES_H_\n" + "#define DRACO_FEATURES_H_\n\n") + + foreach(feature ${draco_features_list}) + file(APPEND "${draco_features_file_name}.new" "#define ${feature}\n") + endforeach() + + file(APPEND "${draco_features_file_name}.new" + "\n#endif // DRACO_FEATURES_H_") + + # Will replace ${draco_features_file_name} only if the file content has + # changed. This prevents forced Draco rebuilds after CMake runs. + configure_file("${draco_features_file_name}.new" + "${draco_features_file_name}") + file(REMOVE "${draco_features_file_name}.new") +endfunction() diff --git a/contrib/draco/cmake/draco_flags.cmake b/contrib/draco/cmake/draco_flags.cmake new file mode 100644 index 000000000..cb9d489e6 --- /dev/null +++ b/contrib/draco/cmake/draco_flags.cmake @@ -0,0 +1,238 @@ +if(DRACO_CMAKE_DRACO_FLAGS_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_FLAGS_CMAKE_ +set(DRACO_CMAKE_DRACO_FLAGS_CMAKE_ 1) + +include(CheckCXXCompilerFlag) +include(CheckCXXSourceCompiles) + +# Adds compiler flags specified by FLAGS to the sources specified by SOURCES: +# +# draco_set_compiler_flags_for_sources(SOURCES FLAGS ) +macro(draco_set_compiler_flags_for_sources) + unset(compiler_SOURCES) + unset(compiler_FLAGS) + unset(optional_args) + unset(single_value_args) + set(multi_value_args SOURCES FLAGS) + cmake_parse_arguments(compiler "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(NOT (compiler_SOURCES AND compiler_FLAGS)) + draco_die("draco_set_compiler_flags_for_sources: SOURCES and " + "FLAGS required.") + endif() + + set_source_files_properties(${compiler_SOURCES} PROPERTIES COMPILE_FLAGS + ${compiler_FLAGS}) + + if(DRACO_VERBOSE GREATER 1) + foreach(source ${compiler_SOURCES}) + foreach(flag ${compiler_FLAGS}) + message("draco_set_compiler_flags_for_sources: source:${source} " + "flag:${flag}") + endforeach() + endforeach() + endif() +endmacro() + +# Tests compiler flags stored in list(s) specified by FLAG_LIST_VAR_NAMES, adds +# flags to $DRACO_CXX_FLAGS when tests pass. Terminates configuration if +# FLAG_REQUIRED is specified and any flag check fails. +# +# ~~~ +# draco_test_cxx_flag(> +# [FLAG_REQUIRED]) +# ~~~ +macro(draco_test_cxx_flag) + unset(cxx_test_FLAG_LIST_VAR_NAMES) + unset(cxx_test_FLAG_REQUIRED) + unset(single_value_args) + set(optional_args FLAG_REQUIRED) + set(multi_value_args FLAG_LIST_VAR_NAMES) + cmake_parse_arguments(cxx_test "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(NOT cxx_test_FLAG_LIST_VAR_NAMES) + draco_die("draco_test_cxx_flag: FLAG_LIST_VAR_NAMES required") + endif() + + unset(cxx_flags) + foreach(list_var ${cxx_test_FLAG_LIST_VAR_NAMES}) + if(DRACO_VERBOSE) + message("draco_test_cxx_flag: adding ${list_var} to cxx_flags") + endif() + list(APPEND cxx_flags ${${list_var}}) + endforeach() + + if(DRACO_VERBOSE) + message("CXX test: all flags: ${cxx_flags}") + endif() + + unset(all_cxx_flags) + list(APPEND all_cxx_flags ${DRACO_CXX_FLAGS} ${cxx_flags}) + + # Turn off output from check_cxx_source_compiles. Print status directly + # instead since the logging messages from check_cxx_source_compiles can be + # quite confusing. + set(CMAKE_REQUIRED_QUIET TRUE) + + # Run the actual compile test. + unset(draco_all_cxx_flags_pass CACHE) + message("--- Running combined CXX flags test, flags: ${all_cxx_flags}") + check_cxx_compiler_flag("${all_cxx_flags}" draco_all_cxx_flags_pass) + + if(cxx_test_FLAG_REQUIRED AND NOT draco_all_cxx_flags_pass) + draco_die("Flag test failed for required flag(s): " + "${all_cxx_flags} and FLAG_REQUIRED specified.") + endif() + + if(draco_all_cxx_flags_pass) + # Test passed: update the global flag list used by the draco target creation + # wrappers. + set(DRACO_CXX_FLAGS ${cxx_flags}) + list(REMOVE_DUPLICATES DRACO_CXX_FLAGS) + + if(DRACO_VERBOSE) + message("DRACO_CXX_FLAGS=${DRACO_CXX_FLAGS}") + endif() + + message("--- Passed combined CXX flags test") + else() + message("--- Failed combined CXX flags test, testing flags individually.") + + if(cxx_flags) + message("--- Testing flags from $cxx_flags: " "${cxx_flags}") + foreach(cxx_flag ${cxx_flags}) + # Since 3.17.0 check_cxx_compiler_flag() sets a normal variable at + # parent scope while check_cxx_source_compiles() continues to set an + # internal cache variable, so we unset both to avoid the failure / + # success state persisting between checks. This has been fixed in newer + # CMake releases, but 3.17 is pretty common: we will need this to avoid + # weird build breakages while the fix propagates. + unset(cxx_flag_test_passed) + unset(cxx_flag_test_passed CACHE) + message("--- Testing flag: ${cxx_flag}") + check_cxx_compiler_flag("${cxx_flag}" cxx_flag_test_passed) + + if(cxx_flag_test_passed) + message("--- Passed test for ${cxx_flag}") + else() + list(REMOVE_ITEM cxx_flags ${cxx_flag}) + message("--- Failed test for ${cxx_flag}, flag removed.") + endif() + endforeach() + + set(DRACO_CXX_FLAGS ${cxx_flags}) + endif() + endif() + + if(DRACO_CXX_FLAGS) + list(REMOVE_DUPLICATES DRACO_CXX_FLAGS) + endif() +endmacro() + +# Tests executable linker flags stored in list specified by FLAG_LIST_VAR_NAME, +# adds flags to $DRACO_EXE_LINKER_FLAGS when test passes. Terminates +# configuration when flag check fails. draco_set_cxx_flags() must be called +# before calling this macro because it assumes $DRACO_CXX_FLAGS contains only +# valid CXX flags. +# +# draco_test_exe_linker_flag() +macro(draco_test_exe_linker_flag) + unset(link_FLAG_LIST_VAR_NAME) + unset(optional_args) + unset(multi_value_args) + set(single_value_args FLAG_LIST_VAR_NAME) + cmake_parse_arguments(link "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(NOT link_FLAG_LIST_VAR_NAME) + draco_die("draco_test_link_flag: FLAG_LIST_VAR_NAME required") + endif() + + draco_set_and_stringify(DEST linker_flags SOURCE_VARS + ${link_FLAG_LIST_VAR_NAME}) + + if(DRACO_VERBOSE) + message("EXE LINKER test: all flags: ${linker_flags}") + endif() + + # Tests of $DRACO_CXX_FLAGS have already passed. Include them with the linker + # test. + draco_set_and_stringify(DEST CMAKE_REQUIRED_FLAGS SOURCE_VARS DRACO_CXX_FLAGS) + + # Cache the global exe linker flags. + if(CMAKE_EXE_LINKER_FLAGS) + set(cached_CMAKE_EXE_LINKER_FLAGS ${CMAKE_EXE_LINKER_FLAGS}) + draco_set_and_stringify(DEST CMAKE_EXE_LINKER_FLAGS SOURCE ${linker_flags}) + endif() + + draco_set_and_stringify(DEST CMAKE_EXE_LINKER_FLAGS SOURCE ${linker_flags} + ${CMAKE_EXE_LINKER_FLAGS}) + + # Turn off output from check_cxx_source_compiles. Print status directly + # instead since the logging messages from check_cxx_source_compiles can be + # quite confusing. + set(CMAKE_REQUIRED_QUIET TRUE) + + message("--- Running EXE LINKER test for flags: ${linker_flags}") + + unset(linker_flag_test_passed CACHE) + set(draco_cxx_main "\nint main() { return 0; }") + check_cxx_source_compiles("${draco_cxx_main}" linker_flag_test_passed) + + if(NOT linker_flag_test_passed) + draco_die("EXE LINKER test failed.") + endif() + + message("--- Passed EXE LINKER flag test.") + + # Restore cached global exe linker flags. + if(cached_CMAKE_EXE_LINKER_FLAGS) + set(CMAKE_EXE_LINKER_FLAGS ${cached_CMAKE_EXE_LINKER_FLAGS}) + else() + unset(CMAKE_EXE_LINKER_FLAGS) + endif() +endmacro() + +# Runs the draco compiler tests. This macro builds up the list of list var(s) +# that is passed to draco_test_cxx_flag(). +# +# Note: draco_set_build_definitions() must be called before this macro. +macro(draco_set_cxx_flags) + unset(cxx_flag_lists) + + if(CMAKE_CXX_COMPILER_ID MATCHES "Clang|GNU") + list(APPEND cxx_flag_lists draco_base_cxx_flags) + endif() + + # Append clang flags after the base set to allow -Wno* overrides to take + # effect. Some of the base flags may enable a large set of warnings, e.g., + # -Wall. + if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") + list(APPEND cxx_flag_lists draco_clang_cxx_flags) + endif() + + if(MSVC) + list(APPEND cxx_flag_lists draco_msvc_cxx_flags) + endif() + + draco_set_and_stringify(DEST cxx_flags SOURCE_VARS ${cxx_flag_lists}) + if(DRACO_VERBOSE) + message("draco_set_cxx_flags: internal CXX flags: ${cxx_flags}") + endif() + + if(DRACO_CXX_FLAGS) + list(APPEND cxx_flag_lists DRACO_CXX_FLAGS) + if(DRACO_VERBOSE) + message("draco_set_cxx_flags: user CXX flags: ${DRACO_CXX_FLAGS}") + endif() + endif() + + draco_set_and_stringify(DEST cxx_flags SOURCE_VARS ${cxx_flag_lists}) + + if(cxx_flags) + draco_test_cxx_flag(FLAG_LIST_VAR_NAMES ${cxx_flag_lists}) + endif() +endmacro() diff --git a/contrib/draco/cmake/draco_helpers.cmake b/contrib/draco/cmake/draco_helpers.cmake new file mode 100644 index 000000000..0b3b804cf --- /dev/null +++ b/contrib/draco/cmake/draco_helpers.cmake @@ -0,0 +1,110 @@ +if(DRACO_CMAKE_DRACO_HELPERS_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_HELPERS_CMAKE_ +set(DRACO_CMAKE_DRACO_HELPERS_CMAKE_ 1) + +# Kills build generation using message(FATAL_ERROR) and outputs all data passed +# to the console via use of $ARGN. +macro(draco_die) + message(FATAL_ERROR ${ARGN}) +endmacro() + +# Converts semi-colon delimited list variable(s) to string. Output is written to +# variable supplied via the DEST parameter. Input is from an expanded variable +# referenced by SOURCE and/or variable(s) referenced by SOURCE_VARS. +macro(draco_set_and_stringify) + set(optional_args) + set(single_value_args DEST SOURCE_VAR) + set(multi_value_args SOURCE SOURCE_VARS) + cmake_parse_arguments(sas "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(NOT sas_DEST OR NOT (sas_SOURCE OR sas_SOURCE_VARS)) + draco_die("draco_set_and_stringify: DEST and at least one of SOURCE " + "SOURCE_VARS required.") + endif() + + unset(${sas_DEST}) + + if(sas_SOURCE) + # $sas_SOURCE is one or more expanded variables, just copy the values to + # $sas_DEST. + set(${sas_DEST} "${sas_SOURCE}") + endif() + + if(sas_SOURCE_VARS) + # $sas_SOURCE_VARS is one or more variable names. Each iteration expands a + # variable and appends it to $sas_DEST. + foreach(source_var ${sas_SOURCE_VARS}) + set(${sas_DEST} "${${sas_DEST}} ${${source_var}}") + endforeach() + + # Because $sas_DEST can be empty when entering this scope leading whitespace + # can be introduced to $sas_DEST on the first iteration of the above loop. + # Remove it: + string(STRIP "${${sas_DEST}}" ${sas_DEST}) + endif() + + # Lists in CMake are simply semicolon delimited strings, so stringification is + # just a find and replace of the semicolon. + string(REPLACE ";" " " ${sas_DEST} "${${sas_DEST}}") + + if(DRACO_VERBOSE GREATER 1) + message("draco_set_and_stringify: ${sas_DEST}=${${sas_DEST}}") + endif() +endmacro() + +# Creates a dummy source file in $DRACO_GENERATED_SOURCES_DIRECTORY and adds it +# to the specified target. Optionally adds its path to a list variable. +# +# draco_create_dummy_source_file( BASENAME > +# [LISTVAR ]) +macro(draco_create_dummy_source_file) + set(optional_args) + set(single_value_args TARGET BASENAME LISTVAR) + set(multi_value_args) + cmake_parse_arguments(cdsf "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(NOT cdsf_TARGET OR NOT cdsf_BASENAME) + draco_die("draco_create_dummy_source_file: TARGET and BASENAME required.") + endif() + + if(NOT DRACO_GENERATED_SOURCES_DIRECTORY) + set(DRACO_GENERATED_SOURCES_DIRECTORY "${draco_build}/gen_src") + endif() + + set(dummy_source_dir "${DRACO_GENERATED_SOURCES_DIRECTORY}") + set(dummy_source_file + "${dummy_source_dir}/draco_${cdsf_TARGET}_${cdsf_BASENAME}.cc") + set(dummy_source_code + "// Generated file. DO NOT EDIT!\n" + "// C++ source file created for target ${cdsf_TARGET}.\n" + "void draco_${cdsf_TARGET}_${cdsf_BASENAME}_dummy_function(void)\;\n" + "void draco_${cdsf_TARGET}_${cdsf_BASENAME}_dummy_function(void) {}\n") + file(WRITE "${dummy_source_file}" ${dummy_source_code}) + + target_sources(${cdsf_TARGET} PRIVATE ${dummy_source_file}) + + if(cdsf_LISTVAR) + list(APPEND ${cdsf_LISTVAR} "${dummy_source_file}") + endif() +endmacro() + +# Loads the version string from $draco_source/draco/version.h and sets +# $DRACO_VERSION. +macro(draco_load_version_info) + file(STRINGS "${draco_src_root}/core/draco_version.h" version_file_strings) + foreach(str ${version_file_strings}) + if(str MATCHES "char kDracoVersion") + string(FIND "${str}" "\"" open_quote_pos) + string(FIND "${str}" ";" semicolon_pos) + math(EXPR open_quote_pos "${open_quote_pos} + 1") + math(EXPR close_quote_pos "${semicolon_pos} - 1") + math(EXPR version_string_length "${close_quote_pos} - ${open_quote_pos}") + string(SUBSTRING "${str}" ${open_quote_pos} ${version_string_length} + DRACO_VERSION) + break() + endif() + endforeach() +endmacro() diff --git a/contrib/draco/cmake/draco_install.cmake b/contrib/draco/cmake/draco_install.cmake new file mode 100644 index 000000000..5c63ecb4a --- /dev/null +++ b/contrib/draco/cmake/draco_install.cmake @@ -0,0 +1,79 @@ +if(DRACO_CMAKE_DRACO_INSTALL_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_INSTALL_CMAKE_ +set(DRACO_CMAKE_DRACO_INSTALL_CMAKE_ 1) + +# Sets up the draco install targets. Must be called after the static library +# target is created. +macro(draco_setup_install_target) + include(GNUInstallDirs) + + # pkg-config: draco.pc + set(prefix "${CMAKE_INSTALL_PREFIX}") + set(exec_prefix "\${prefix}") + set(libdir "\${prefix}/${CMAKE_INSTALL_LIBDIR}") + set(includedir "\${prefix}/${CMAKE_INSTALL_INCLUDEDIR}") + set(draco_lib_name "draco") + + configure_file("${draco_root}/cmake/draco.pc.template" + "${draco_build}/draco.pc" @ONLY NEWLINE_STYLE UNIX) + install(FILES "${draco_build}/draco.pc" + DESTINATION "${prefix}/${CMAKE_INSTALL_LIBDIR}/pkgconfig") + + # CMake config: draco-config.cmake + set(DRACO_INCLUDE_DIRS "${prefix}/${CMAKE_INSTALL_INCLUDEDIR}") + configure_file("${draco_root}/cmake/draco-config.cmake.template" + "${draco_build}/draco-config.cmake" @ONLY NEWLINE_STYLE UNIX) + install( + FILES "${draco_build}/draco-config.cmake" + DESTINATION "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_DATAROOTDIR}/cmake") + + foreach(file ${draco_sources}) + if(file MATCHES "h$") + list(APPEND draco_api_includes ${file}) + endif() + endforeach() + + # Strip $draco_src_root from the file paths: we need to install relative to + # $include_directory. + list(TRANSFORM draco_api_includes REPLACE "${draco_src_root}/" "") + set(include_directory "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}") + + foreach(draco_api_include ${draco_api_includes}) + get_filename_component(file_directory ${draco_api_include} DIRECTORY) + set(target_directory "${include_directory}/draco/${file_directory}") + install(FILES ${draco_src_root}/${draco_api_include} + DESTINATION "${target_directory}") + endforeach() + + install( + FILES "${draco_build}/draco/draco_features.h" + DESTINATION "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/draco/") + + install(TARGETS draco_decoder DESTINATION + "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_BINDIR}") + install(TARGETS draco_encoder DESTINATION + "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_BINDIR}") + + if(WIN32) + install(TARGETS draco DESTINATION + "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") + else() + install(TARGETS draco_static DESTINATION + "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") + if(BUILD_SHARED_LIBS) + install(TARGETS draco_shared DESTINATION + "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") + endif() + endif() + + if(DRACO_UNITY_PLUGIN) + install(TARGETS dracodec_unity DESTINATION + "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") + endif() + if(DRACO_MAYA_PLUGIN) + install(TARGETS draco_maya_wrapper DESTINATION + "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") + endif() + +endmacro() diff --git a/contrib/draco/cmake/draco_intrinsics.cmake b/contrib/draco/cmake/draco_intrinsics.cmake new file mode 100644 index 000000000..9011c0de5 --- /dev/null +++ b/contrib/draco/cmake/draco_intrinsics.cmake @@ -0,0 +1,96 @@ +if(DRACO_CMAKE_DRACO_INTRINSICS_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_INTRINSICS_CMAKE_ +set(DRACO_CMAKE_DRACO_INTRINSICS_CMAKE_ 1) + +# Returns the compiler flag for the SIMD intrinsics suffix specified by the +# SUFFIX argument via the variable specified by the VARIABLE argument: +# draco_get_intrinsics_flag_for_suffix(SUFFIX VARIABLE ) +macro(draco_get_intrinsics_flag_for_suffix) + unset(intrinsics_SUFFIX) + unset(intrinsics_VARIABLE) + unset(optional_args) + unset(multi_value_args) + set(single_value_args SUFFIX VARIABLE) + cmake_parse_arguments(intrinsics "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(NOT (intrinsics_SUFFIX AND intrinsics_VARIABLE)) + message(FATAL_ERROR "draco_get_intrinsics_flag_for_suffix: SUFFIX and " + "VARIABLE required.") + endif() + + if(intrinsics_SUFFIX MATCHES "neon") + if(NOT MSVC) + set(${intrinsics_VARIABLE} "${DRACO_NEON_INTRINSICS_FLAG}") + endif() + elseif(intrinsics_SUFFIX MATCHES "sse4") + if(NOT MSVC) + set(${intrinsics_VARIABLE} "-msse4.1") + endif() + else() + message(FATAL_ERROR "draco_get_intrinsics_flag_for_suffix: Unknown " + "instrinics suffix: ${intrinsics_SUFFIX}") + endif() + + if(DRACO_VERBOSE GREATER 1) + message("draco_get_intrinsics_flag_for_suffix: " + "suffix:${intrinsics_SUFFIX} flag:${${intrinsics_VARIABLE}}") + endif() +endmacro() + +# Processes source files specified by SOURCES and adds intrinsics flags as +# necessary: draco_process_intrinsics_sources(SOURCES ) +# +# Detects requirement for intrinsics flags using source file name suffix. +# Currently supports only SSE4.1. +macro(draco_process_intrinsics_sources) + unset(arg_TARGET) + unset(arg_SOURCES) + unset(optional_args) + set(single_value_args TARGET) + set(multi_value_args SOURCES) + cmake_parse_arguments(arg "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + if(NOT (arg_TARGET AND arg_SOURCES)) + message(FATAL_ERROR "draco_process_intrinsics_sources: TARGET and " + "SOURCES required.") + endif() + + if(DRACO_ENABLE_SSE4_1 AND draco_have_sse4) + unset(sse4_sources) + list(APPEND sse4_sources ${arg_SOURCES}) + + list(FILTER sse4_sources INCLUDE REGEX + "${draco_sse4_source_file_suffix}$") + + if(sse4_sources) + unset(sse4_flags) + draco_get_intrinsics_flag_for_suffix(SUFFIX + ${draco_sse4_source_file_suffix} + VARIABLE sse4_flags) + if(sse4_flags) + draco_set_compiler_flags_for_sources(SOURCES ${sse4_sources} FLAGS + ${sse4_flags}) + endif() + endif() + endif() + + if(DRACO_ENABLE_NEON AND draco_have_neon) + unset(neon_sources) + list(APPEND neon_sources ${arg_SOURCES}) + list(FILTER neon_sources INCLUDE REGEX + "${draco_neon_source_file_suffix}$") + + if(neon_sources AND DRACO_NEON_INTRINSICS_FLAG) + unset(neon_flags) + draco_get_intrinsics_flag_for_suffix(SUFFIX + ${draco_neon_source_file_suffix} + VARIABLE neon_flags) + if(neon_flags) + draco_set_compiler_flags_for_sources(SOURCES ${neon_sources} FLAGS + ${neon_flags}) + endif() + endif() + endif() +endmacro() diff --git a/contrib/draco/cmake/draco_options.cmake b/contrib/draco/cmake/draco_options.cmake new file mode 100644 index 000000000..832bfb69f --- /dev/null +++ b/contrib/draco/cmake/draco_options.cmake @@ -0,0 +1,239 @@ +if(DRACO_CMAKE_DRACO_OPTIONS_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_OPTIONS_CMAKE_ +set(DRACO_CMAKE_DRACO_OPTIONS_CMAKE_) + +set(draco_features_file_name "${draco_build}/draco/draco_features.h") +set(draco_features_list) + +# Simple wrapper for CMake's builtin option command that tracks draco's build +# options in the list variable $draco_options. +macro(draco_option) + unset(option_NAME) + unset(option_HELPSTRING) + unset(option_VALUE) + unset(optional_args) + unset(multi_value_args) + set(single_value_args NAME HELPSTRING VALUE) + cmake_parse_arguments(option "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(NOT (option_NAME AND option_HELPSTRING AND DEFINED option_VALUE)) + message(FATAL_ERROR "draco_option: NAME HELPSTRING and VALUE required.") + endif() + + option(${option_NAME} ${option_HELPSTRING} ${option_VALUE}) + + if(DRACO_VERBOSE GREATER 2) + message("--------- draco_option ---------\n" "option_NAME=${option_NAME}\n" + "option_HELPSTRING=${option_HELPSTRING}\n" + "option_VALUE=${option_VALUE}\n" + "------------------------------------------\n") + endif() + + list(APPEND draco_options ${option_NAME}) + list(REMOVE_DUPLICATES draco_options) +endmacro() + +# Dumps the $draco_options list via CMake message command. +macro(draco_dump_options) + foreach(option_name ${draco_options}) + message("${option_name}: ${${option_name}}") + endforeach() +endmacro() + +# Set default options. +macro(draco_set_default_options) + draco_option(NAME DRACO_FAST HELPSTRING "Try to build faster libs." VALUE OFF) + draco_option(NAME DRACO_JS_GLUE HELPSTRING + "Enable JS Glue and JS targets when using Emscripten." VALUE ON) + draco_option(NAME DRACO_IE_COMPATIBLE HELPSTRING + "Enable support for older IE builds when using Emscripten." VALUE + OFF) + draco_option(NAME DRACO_MESH_COMPRESSION HELPSTRING "Enable mesh compression." + VALUE ON) + draco_option(NAME DRACO_POINT_CLOUD_COMPRESSION HELPSTRING + "Enable point cloud compression." VALUE ON) + draco_option(NAME DRACO_PREDICTIVE_EDGEBREAKER HELPSTRING + "Enable predictive edgebreaker." VALUE ON) + draco_option(NAME DRACO_STANDARD_EDGEBREAKER HELPSTRING + "Enable stand edgebreaker." VALUE ON) + draco_option(NAME DRACO_BACKWARDS_COMPATIBILITY HELPSTRING + "Enable backwards compatibility." VALUE ON) + draco_option(NAME DRACO_DECODER_ATTRIBUTE_DEDUPLICATION HELPSTRING + "Enable attribute deduping." VALUE OFF) + draco_option(NAME DRACO_TESTS HELPSTRING "Enables tests." VALUE OFF) + draco_option(NAME DRACO_WASM HELPSTRING "Enables WASM support." VALUE OFF) + draco_option(NAME DRACO_UNITY_PLUGIN HELPSTRING + "Build plugin library for Unity." VALUE OFF) + draco_option(NAME DRACO_ANIMATION_ENCODING HELPSTRING "Enable animation." + VALUE OFF) + draco_option(NAME DRACO_GLTF HELPSTRING "Support GLTF." VALUE OFF) + draco_option(NAME DRACO_MAYA_PLUGIN HELPSTRING + "Build plugin library for Maya." VALUE OFF) + draco_check_deprecated_options() +endmacro() + +# Warns when a deprecated option is used and sets the option that replaced it. +macro(draco_handle_deprecated_option) + unset(option_OLDNAME) + unset(option_NEWNAME) + unset(optional_args) + unset(multi_value_args) + set(single_value_args OLDNAME NEWNAME) + cmake_parse_arguments(option "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if("${${option_OLDNAME}}") + message(WARNING "${option_OLDNAME} is deprecated. Use ${option_NEWNAME}.") + set(${option_NEWNAME} ${${option_OLDNAME}}) + endif() +endmacro() + +# Checks for use of deprecated options. +macro(draco_check_deprecated_options) + draco_handle_deprecated_option(OLDNAME ENABLE_EXTRA_SPEED NEWNAME DRACO_FAST) + draco_handle_deprecated_option(OLDNAME ENABLE_JS_GLUE NEWNAME DRACO_JS_GLUE) + draco_handle_deprecated_option(OLDNAME ENABLE_MESH_COMPRESSION NEWNAME + DRACO_MESH_COMPRESSION) + draco_handle_deprecated_option(OLDNAME ENABLE_POINT_CLOUD_COMPRESSION NEWNAME + DRACO_POINT_CLOUD_COMPRESSION) + draco_handle_deprecated_option(OLDNAME ENABLE_PREDICTIVE_EDGEBREAKER NEWNAME + DRACO_PREDICTIVE_EDGEBREAKER) + draco_handle_deprecated_option(OLDNAME ENABLE_STANDARD_EDGEBREAKER NEWNAME + DRACO_STANDARD_EDGEBREAKER) + draco_handle_deprecated_option(OLDNAME ENABLE_BACKWARDS_COMPATIBILITY NEWNAME + DRACO_BACKWARDS_COMPATIBILITY) + draco_handle_deprecated_option(OLDNAME ENABLE_DECODER_ATTRIBUTE_DEDUPLICATION + NEWNAME DRACO_DECODER_ATTRIBUTE_DEDUPLICATION) + draco_handle_deprecated_option(OLDNAME ENABLE_TESTS NEWNAME DRACO_TESTS) + draco_handle_deprecated_option(OLDNAME ENABLE_WASM NEWNAME DRACO_WASM) + draco_handle_deprecated_option(OLDNAME BUILD_UNITY_PLUGIN NEWNAME + DRACO_UNITY_PLUGIN) + draco_handle_deprecated_option(OLDNAME BUILD_ANIMATION_ENCODING NEWNAME + DRACO_ANIMATION_ENCODING) + draco_handle_deprecated_option(OLDNAME BUILD_FOR_GLTF NEWNAME DRACO_GLTF) + draco_handle_deprecated_option(OLDNAME BUILD_MAYA_PLUGIN NEWNAME + DRACO_MAYA_PLUGIN) + draco_handle_deprecated_option(OLDNAME BUILD_USD_PLUGIN NEWNAME + BUILD_SHARED_LIBS) + +endmacro() + +# Macro for setting Draco features based on user configuration. Features enabled +# by this macro are Draco global. +macro(draco_set_optional_features) + if(DRACO_GLTF) + # Override settings when building for GLTF. + draco_enable_feature(FEATURE "DRACO_MESH_COMPRESSION_SUPPORTED") + draco_enable_feature(FEATURE "DRACO_NORMAL_ENCODING_SUPPORTED") + draco_enable_feature(FEATURE "DRACO_STANDARD_EDGEBREAKER_SUPPORTED") + else() + if(DRACO_POINT_CLOUD_COMPRESSION) + draco_enable_feature(FEATURE "DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED") + endif() + if(DRACO_MESH_COMPRESSION) + draco_enable_feature(FEATURE "DRACO_MESH_COMPRESSION_SUPPORTED") + draco_enable_feature(FEATURE "DRACO_NORMAL_ENCODING_SUPPORTED") + + if(DRACO_STANDARD_EDGEBREAKER) + draco_enable_feature(FEATURE "DRACO_STANDARD_EDGEBREAKER_SUPPORTED") + endif() + if(DRACO_PREDICTIVE_EDGEBREAKER) + draco_enable_feature(FEATURE "DRACO_PREDICTIVE_EDGEBREAKER_SUPPORTED") + endif() + endif() + + if(DRACO_BACKWARDS_COMPATIBILITY) + draco_enable_feature(FEATURE "DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED") + endif() + + + if(NOT EMSCRIPTEN) + # For now, enable deduplication for both encoder and decoder. + # TODO(ostava): Support for disabling attribute deduplication for the C++ + # decoder is planned in future releases. + draco_enable_feature(FEATURE + DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED) + draco_enable_feature(FEATURE + DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED) + endif() + endif() + + if(DRACO_UNITY_PLUGIN) + draco_enable_feature(FEATURE "DRACO_UNITY_PLUGIN") + set(CMAKE_POSITION_INDEPENDENT_CODE ON) + endif() + + if(DRACO_MAYA_PLUGIN) + draco_enable_feature(FEATURE "DRACO_MAYA_PLUGIN") + set(CMAKE_POSITION_INDEPENDENT_CODE ON) + endif() + +endmacro() + +# Macro that handles tracking of Draco preprocessor symbols for the purpose of +# producing draco_features.h. +# +# ~~~ +# draco_enable_feature(FEATURE [TARGETS ]) +# ~~~ +# +# FEATURE is required. It should be a Draco preprocessor symbol. TARGETS is +# optional. It can be one or more draco targets. +# +# When the TARGETS argument is not present the preproc symbol is added to +# draco_features.h. When it is draco_features.h is unchanged, and +# target_compile_options() is called for each target specified. +macro(draco_enable_feature) + set(def_flags) + set(def_single_arg_opts FEATURE) + set(def_multi_arg_opts TARGETS) + cmake_parse_arguments(DEF "${def_flags}" "${def_single_arg_opts}" + "${def_multi_arg_opts}" ${ARGN}) + if("${DEF_FEATURE}" STREQUAL "") + message(FATAL_ERROR "Empty FEATURE passed to draco_enable_feature().") + endif() + + # Do nothing/return early if $DEF_FEATURE is already in the list. + list(FIND draco_features_list ${DEF_FEATURE} df_index) + if(NOT df_index EQUAL -1) + return() + endif() + + list(LENGTH DEF_TARGETS df_targets_list_length) + if(${df_targets_list_length} EQUAL 0) + list(APPEND draco_features_list ${DEF_FEATURE}) + else() + foreach(target ${DEF_TARGETS}) + target_compile_definitions(${target} PRIVATE ${DEF_FEATURE}) + endforeach() + endif() +endmacro() + +# Function for generating draco_features.h. +function(draco_generate_features_h) + file(WRITE "${draco_features_file_name}.new" + "// GENERATED FILE -- DO NOT EDIT\n\n" "#ifndef DRACO_FEATURES_H_\n" + "#define DRACO_FEATURES_H_\n\n") + + foreach(feature ${draco_features_list}) + file(APPEND "${draco_features_file_name}.new" "#define ${feature}\n") + endforeach() + + file(APPEND "${draco_features_file_name}.new" + "\n#endif // DRACO_FEATURES_H_") + + # Will replace ${draco_features_file_name} only if the file content has + # changed. This prevents forced Draco rebuilds after CMake runs. + configure_file("${draco_features_file_name}.new" + "${draco_features_file_name}") + file(REMOVE "${draco_features_file_name}.new") +endfunction() + +# Sets default options for the build and processes user controlled options to +# compute enabled features. +macro(draco_setup_options) + draco_set_default_options() + draco_set_optional_features() +endmacro() diff --git a/contrib/draco/cmake/draco_sanitizer.cmake b/contrib/draco/cmake/draco_sanitizer.cmake new file mode 100644 index 000000000..ca8e23176 --- /dev/null +++ b/contrib/draco/cmake/draco_sanitizer.cmake @@ -0,0 +1,32 @@ +if(DRACO_CMAKE_DRACO_SANITIZER_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_SANITIZER_CMAKE_ +set(DRACO_CMAKE_DRACO_SANITIZER_CMAKE_ 1) + +# Handles the details of enabling sanitizers. +macro(draco_configure_sanitizer) + if(DRACO_SANITIZE AND NOT MSVC) + if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") + if(DRACO_SANITIZE MATCHES "cfi") + list(APPEND DRACO_CXX_FLAGS "-flto" "-fno-sanitize-trap=cfi") + list(APPEND DRACO_EXE_LINKER_FLAGS "-flto" "-fno-sanitize-trap=cfi" + "-fuse-ld=gold") + endif() + + if(${CMAKE_SIZEOF_VOID_P} EQUAL 4 + AND DRACO_SANITIZE MATCHES "integer|undefined") + list(APPEND DRACO_EXE_LINKER_FLAGS "--rtlib=compiler-rt" "-lgcc_s") + endif() + endif() + + list(APPEND DRACO_CXX_FLAGS "-fsanitize=${DRACO_SANITIZE}") + list(APPEND DRACO_EXE_LINKER_FLAGS "-fsanitize=${DRACO_SANITIZE}") + + # Make sanitizer callstacks accurate. + list(APPEND DRACO_CXX_FLAGS "-fno-omit-frame-pointer" + "-fno-optimize-sibling-calls") + + draco_test_cxx_flag(FLAG_LIST_VAR_NAMES DRACO_CXX_FLAGS FLAG_REQUIRED) + draco_test_exe_linker_flag(FLAG_LIST_VAR_NAME DRACO_EXE_LINKER_FLAGS) + endif() +endmacro() diff --git a/contrib/draco/cmake/draco_targets.cmake b/contrib/draco/cmake/draco_targets.cmake new file mode 100644 index 000000000..6dfa6a0c4 --- /dev/null +++ b/contrib/draco/cmake/draco_targets.cmake @@ -0,0 +1,349 @@ +if(DRACO_CMAKE_DRACO_TARGETS_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_TARGETS_CMAKE_ +set(DRACO_CMAKE_DRACO_TARGETS_CMAKE_ 1) + +# Resets list variables used to track draco targets. +macro(draco_reset_target_lists) + unset(draco_targets) + unset(draco_exe_targets) + unset(draco_lib_targets) + unset(draco_objlib_targets) + unset(draco_module_targets) + unset(draco_sources) + unset(draco_test_targets) +endmacro() + +# Creates an executable target. The target name is passed as a parameter to the +# NAME argument, and the sources passed as a parameter to the SOURCES argument: +# draco_add_executable(NAME SOURCES [optional args]) +# +# Optional args: +# cmake-format: off +# - OUTPUT_NAME: Override output file basename. Target basename defaults to +# NAME. +# - TEST: Flag. Presence means treat executable as a test. +# - DEFINES: List of preprocessor macro definitions. +# - INCLUDES: list of include directories for the target. +# - COMPILE_FLAGS: list of compiler flags for the target. +# - LINK_FLAGS: List of linker flags for the target. +# - OBJLIB_DEPS: List of CMake object library target dependencies. +# - LIB_DEPS: List of CMake library dependencies. +# cmake-format: on +# +# Sources passed to this macro are added to $draco_test_sources when TEST is +# specified. Otherwise sources are added to $draco_sources. +# +# Targets passed to this macro are always added to the $draco_targets list. When +# TEST is specified targets are also added to the $draco_test_targets list. +# Otherwise targets are added to $draco_exe_targets. +macro(draco_add_executable) + unset(exe_TEST) + unset(exe_TEST_DEFINES_MAIN) + unset(exe_NAME) + unset(exe_OUTPUT_NAME) + unset(exe_SOURCES) + unset(exe_DEFINES) + unset(exe_INCLUDES) + unset(exe_COMPILE_FLAGS) + unset(exe_LINK_FLAGS) + unset(exe_OBJLIB_DEPS) + unset(exe_LIB_DEPS) + set(optional_args TEST) + set(single_value_args NAME OUTPUT_NAME) + set(multi_value_args SOURCES DEFINES INCLUDES COMPILE_FLAGS LINK_FLAGS + OBJLIB_DEPS LIB_DEPS) + + cmake_parse_arguments(exe "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(DRACO_VERBOSE GREATER 1) + message("--------- draco_add_executable ---------\n" + "exe_TEST=${exe_TEST}\n" + "exe_TEST_DEFINES_MAIN=${exe_TEST_DEFINES_MAIN}\n" + "exe_NAME=${exe_NAME}\n" + "exe_OUTPUT_NAME=${exe_OUTPUT_NAME}\n" + "exe_SOURCES=${exe_SOURCES}\n" + "exe_DEFINES=${exe_DEFINES}\n" + "exe_INCLUDES=${exe_INCLUDES}\n" + "exe_COMPILE_FLAGS=${exe_COMPILE_FLAGS}\n" + "exe_LINK_FLAGS=${exe_LINK_FLAGS}\n" + "exe_OBJLIB_DEPS=${exe_OBJLIB_DEPS}\n" + "exe_LIB_DEPS=${exe_LIB_DEPS}\n" + "------------------------------------------\n") + endif() + + if(NOT (exe_NAME AND exe_SOURCES)) + message(FATAL_ERROR "draco_add_executable: NAME and SOURCES required.") + endif() + + list(APPEND draco_targets ${exe_NAME}) + if(exe_TEST) + list(APPEND draco_test_targets ${exe_NAME}) + list(APPEND draco_test_sources ${exe_SOURCES}) + else() + list(APPEND draco_exe_targets ${exe_NAME}) + list(APPEND draco_sources ${exe_SOURCES}) + endif() + + add_executable(${exe_NAME} ${exe_SOURCES}) + + if(exe_OUTPUT_NAME) + set_target_properties(${exe_NAME} PROPERTIES OUTPUT_NAME ${exe_OUTPUT_NAME}) + endif() + + draco_process_intrinsics_sources(TARGET ${exe_NAME} SOURCES ${exe_SOURCES}) + + if(exe_DEFINES) + target_compile_definitions(${exe_NAME} PRIVATE ${exe_DEFINES}) + endif() + + if(exe_INCLUDES) + target_include_directories(${exe_NAME} PRIVATE ${exe_INCLUDES}) + endif() + + if(exe_COMPILE_FLAGS OR DRACO_CXX_FLAGS) + target_compile_options(${exe_NAME} + PRIVATE ${exe_COMPILE_FLAGS} ${DRACO_CXX_FLAGS}) + endif() + + if(exe_LINK_FLAGS OR DRACO_EXE_LINKER_FLAGS) + if(${CMAKE_VERSION} VERSION_LESS "3.13") + set(link_flags ${exe_LINK_FLAGS} ${DRACO_EXE_LINKER_FLAGS}) + set_target_properties(${exe_NAME} + PROPERTIES LINK_FLAGS ${exe_LINK_FLAGS} + ${DRACO_EXE_LINKER_FLAGS}) + else() + target_link_options(${exe_NAME} PRIVATE ${exe_LINK_FLAGS} + ${DRACO_EXE_LINKER_FLAGS}) + endif() + endif() + + if(exe_OBJLIB_DEPS) + foreach(objlib_dep ${exe_OBJLIB_DEPS}) + target_sources(${exe_NAME} PRIVATE $) + endforeach() + endif() + + if(CMAKE_THREAD_LIBS_INIT) + list(APPEND exe_LIB_DEPS ${CMAKE_THREAD_LIBS_INIT}) + endif() + + if(BUILD_SHARED_LIBS AND (MSVC OR WIN32)) + target_compile_definitions(${lib_NAME} PRIVATE "DRACO_BUILDING_DLL=0") + endif() + + if(exe_LIB_DEPS) + unset(exe_static) + if("${CMAKE_EXE_LINKER_FLAGS} ${DRACO_EXE_LINKER_FLAGS}" MATCHES "static") + set(exe_static ON) + endif() + + if(exe_static AND CMAKE_CXX_COMPILER_ID MATCHES "Clang|GNU") + # Third party dependencies can introduce dependencies on system and test + # libraries. Since the target created here is an executable, and CMake + # does not provide a method of controlling order of link dependencies, + # wrap all of the dependencies of this target in start/end group flags to + # ensure that dependencies of third party targets can be resolved when + # those dependencies happen to be resolved by dependencies of the current + # target. + list(INSERT exe_LIB_DEPS 0 -Wl,--start-group) + list(APPEND exe_LIB_DEPS -Wl,--end-group) + endif() + target_link_libraries(${exe_NAME} PRIVATE ${exe_LIB_DEPS}) + endif() +endmacro() + +# Creates a library target of the specified type. The target name is passed as a +# parameter to the NAME argument, the type as a parameter to the TYPE argument, +# and the sources passed as a parameter to the SOURCES argument: +# draco_add_library(NAME TYPE SOURCES [optional args]) +# +# Optional args: +# cmake-format: off +# - OUTPUT_NAME: Override output file basename. Target basename defaults to +# NAME. OUTPUT_NAME is ignored when BUILD_SHARED_LIBS is enabled and CMake +# is generating a build for which MSVC or WIN32 are true. This is to avoid +# output basename collisions with DLL import libraries. +# - TEST: Flag. Presence means treat library as a test. +# - DEFINES: List of preprocessor macro definitions. +# - INCLUDES: list of include directories for the target. +# - COMPILE_FLAGS: list of compiler flags for the target. +# - LINK_FLAGS: List of linker flags for the target. +# - OBJLIB_DEPS: List of CMake object library target dependencies. +# - LIB_DEPS: List of CMake library dependencies. +# - PUBLIC_INCLUDES: List of include paths to export to dependents. +# cmake-format: on +# +# Sources passed to the macro are added to the lists tracking draco sources: +# cmake-format: off +# - When TEST is specified sources are added to $draco_test_sources. +# - Otherwise sources are added to $draco_sources. +# cmake-format: on +# +# Targets passed to this macro are added to the lists tracking draco targets: +# cmake-format: off +# - Targets are always added to $draco_targets. +# - When the TEST flag is specified, targets are added to +# $draco_test_targets. +# - When TEST is not specified: +# - Libraries of type SHARED are added to $draco_dylib_targets. +# - Libraries of type OBJECT are added to $draco_objlib_targets. +# - Libraries of type STATIC are added to $draco_lib_targets. +# cmake-format: on +macro(draco_add_library) + unset(lib_TEST) + unset(lib_NAME) + unset(lib_OUTPUT_NAME) + unset(lib_TYPE) + unset(lib_SOURCES) + unset(lib_DEFINES) + unset(lib_INCLUDES) + unset(lib_COMPILE_FLAGS) + unset(lib_LINK_FLAGS) + unset(lib_OBJLIB_DEPS) + unset(lib_LIB_DEPS) + unset(lib_PUBLIC_INCLUDES) + unset(lib_TARGET_PROPERTIES) + set(optional_args TEST) + set(single_value_args NAME OUTPUT_NAME TYPE) + set(multi_value_args SOURCES DEFINES INCLUDES COMPILE_FLAGS LINK_FLAGS + OBJLIB_DEPS LIB_DEPS PUBLIC_INCLUDES TARGET_PROPERTIES) + + cmake_parse_arguments(lib "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(DRACO_VERBOSE GREATER 1) + message("--------- draco_add_library ---------\n" + "lib_TEST=${lib_TEST}\n" + "lib_NAME=${lib_NAME}\n" + "lib_OUTPUT_NAME=${lib_OUTPUT_NAME}\n" + "lib_TYPE=${lib_TYPE}\n" + "lib_SOURCES=${lib_SOURCES}\n" + "lib_DEFINES=${lib_DEFINES}\n" + "lib_INCLUDES=${lib_INCLUDES}\n" + "lib_COMPILE_FLAGS=${lib_COMPILE_FLAGS}\n" + "lib_LINK_FLAGS=${lib_LINK_FLAGS}\n" + "lib_OBJLIB_DEPS=${lib_OBJLIB_DEPS}\n" + "lib_LIB_DEPS=${lib_LIB_DEPS}\n" + "lib_PUBLIC_INCLUDES=${lib_PUBLIC_INCLUDES}\n" + "---------------------------------------\n") + endif() + + if(NOT (lib_NAME AND lib_TYPE)) + message(FATAL_ERROR "draco_add_library: NAME and TYPE required.") + endif() + + list(APPEND draco_targets ${lib_NAME}) + if(lib_TEST) + list(APPEND draco_test_targets ${lib_NAME}) + list(APPEND draco_test_sources ${lib_SOURCES}) + else() + list(APPEND draco_sources ${lib_SOURCES}) + if(lib_TYPE STREQUAL MODULE) + list(APPEND draco_module_targets ${lib_NAME}) + elseif(lib_TYPE STREQUAL OBJECT) + list(APPEND draco_objlib_targets ${lib_NAME}) + elseif(lib_TYPE STREQUAL SHARED) + list(APPEND draco_dylib_targets ${lib_NAME}) + elseif(lib_TYPE STREQUAL STATIC) + list(APPEND draco_lib_targets ${lib_NAME}) + else() + message(WARNING "draco_add_library: Unhandled type: ${lib_TYPE}") + endif() + endif() + + add_library(${lib_NAME} ${lib_TYPE} ${lib_SOURCES}) + if(lib_SOURCES) + draco_process_intrinsics_sources(TARGET ${lib_NAME} SOURCES ${lib_SOURCES}) + endif() + + if(lib_OUTPUT_NAME) + if(NOT (BUILD_SHARED_LIBS AND (MSVC OR WIN32))) + set_target_properties(${lib_NAME} + PROPERTIES OUTPUT_NAME ${lib_OUTPUT_NAME}) + endif() + endif() + + if(lib_DEFINES) + target_compile_definitions(${lib_NAME} PRIVATE ${lib_DEFINES}) + endif() + + if(lib_INCLUDES) + target_include_directories(${lib_NAME} PRIVATE ${lib_INCLUDES}) + endif() + + if(lib_PUBLIC_INCLUDES) + target_include_directories(${lib_NAME} PUBLIC ${lib_PUBLIC_INCLUDES}) + endif() + + if(lib_COMPILE_FLAGS OR DRACO_CXX_FLAGS) + target_compile_options(${lib_NAME} + PRIVATE ${lib_COMPILE_FLAGS} ${DRACO_CXX_FLAGS}) + endif() + + if(lib_LINK_FLAGS) + set_target_properties(${lib_NAME} PROPERTIES LINK_FLAGS ${lib_LINK_FLAGS}) + endif() + + if(lib_OBJLIB_DEPS) + foreach(objlib_dep ${lib_OBJLIB_DEPS}) + target_sources(${lib_NAME} PRIVATE $) + endforeach() + endif() + + if(lib_LIB_DEPS) + if(lib_TYPE STREQUAL STATIC) + set(link_type PUBLIC) + else() + set(link_type PRIVATE) + if(lib_TYPE STREQUAL SHARED AND CMAKE_CXX_COMPILER_ID MATCHES "Clang|GNU") + # The draco shared object uses the static draco as input to turn it into + # a shared object. Include everything from the static library in the + # shared object. + if(APPLE) + list(INSERT lib_LIB_DEPS 0 -Wl,-force_load) + else() + list(INSERT lib_LIB_DEPS 0 -Wl,--whole-archive) + list(APPEND lib_LIB_DEPS -Wl,--no-whole-archive) + endif() + endif() + endif() + target_link_libraries(${lib_NAME} ${link_type} ${lib_LIB_DEPS}) + endif() + + if(NOT MSVC AND lib_NAME MATCHES "^lib") + # Non-MSVC generators prepend lib to static lib target file names. Libdraco + # already includes lib in its name. Avoid naming output files liblib*. + set_target_properties(${lib_NAME} PROPERTIES PREFIX "") + endif() + + if(lib_TYPE STREQUAL SHARED AND NOT MSVC) + set_target_properties(${lib_NAME} PROPERTIES SOVERSION ${DRACO_SOVERSION}) + endif() + + if(BUILD_SHARED_LIBS AND (MSVC OR WIN32)) + if(lib_TYPE STREQUAL SHARED) + target_compile_definitions(${lib_NAME} PRIVATE "DRACO_BUILDING_DLL=1") + else() + target_compile_definitions(${lib_NAME} PRIVATE "DRACO_BUILDING_DLL=0") + endif() + endif() + + # Determine if $lib_NAME is a header only target. + unset(sources_list) + if(lib_SOURCES) + set(sources_list ${lib_SOURCES}) + list(FILTER sources_list INCLUDE REGEX cc$) + endif() + + if(NOT sources_list) + if(NOT XCODE) + # This is a header only target. Tell CMake the link language. + set_target_properties(${lib_NAME} PROPERTIES LINKER_LANGUAGE CXX) + else() + # The Xcode generator ignores LINKER_LANGUAGE. Add a dummy cc file. + draco_create_dummy_source_file(TARGET ${lib_NAME} BASENAME ${lib_NAME}) + endif() + endif() +endmacro() diff --git a/contrib/draco/cmake/draco_test_config.h.cmake b/contrib/draco/cmake/draco_test_config.h.cmake new file mode 100644 index 000000000..77a574123 --- /dev/null +++ b/contrib/draco/cmake/draco_test_config.h.cmake @@ -0,0 +1,13 @@ +#ifndef DRACO_TESTING_DRACO_TEST_CONFIG_H_ +#define DRACO_TESTING_DRACO_TEST_CONFIG_H_ + +// If this file is named draco_test_config.h.cmake: +// This file is used as input at cmake generation time. + +// If this file is named draco_test_config.h: +// GENERATED FILE, DO NOT EDIT. SEE ABOVE. + +#define DRACO_TEST_DATA_DIR "${DRACO_TEST_DATA_DIR}" +#define DRACO_TEST_TEMP_DIR "${DRACO_TEST_TEMP_DIR}" + +#endif // DRACO_TESTING_DRACO_TEST_CONFIG_H_ diff --git a/contrib/draco/cmake/draco_tests.cmake b/contrib/draco/cmake/draco_tests.cmake new file mode 100644 index 000000000..a6dfc5b57 --- /dev/null +++ b/contrib/draco/cmake/draco_tests.cmake @@ -0,0 +1,133 @@ +if(DRACO_CMAKE_DRACO_TESTS_CMAKE) + return() +endif() +set(DRACO_CMAKE_DRACO_TESTS_CMAKE 1) + +# The factory tests are in a separate target to avoid breaking tests that rely +# on file I/O via the factories. The fake reader and writer implementations +# interfere with normal file I/O function. +set(draco_factory_test_sources + "${draco_src_root}/io/file_reader_factory_test.cc" + "${draco_src_root}/io/file_writer_factory_test.cc") + +list( + APPEND + draco_test_sources + "${draco_src_root}/animation/keyframe_animation_encoding_test.cc" + "${draco_src_root}/animation/keyframe_animation_test.cc" + "${draco_src_root}/attributes/point_attribute_test.cc" + "${draco_src_root}/compression/attributes/point_d_vector_test.cc" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc" + "${draco_src_root}/compression/attributes/sequential_integer_attribute_encoding_test.cc" + "${draco_src_root}/compression/bit_coders/rans_coding_test.cc" + "${draco_src_root}/compression/decode_test.cc" + "${draco_src_root}/compression/encode_test.cc" + "${draco_src_root}/compression/entropy/shannon_entropy_test.cc" + "${draco_src_root}/compression/entropy/symbol_coding_test.cc" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_encoding_test.cc" + "${draco_src_root}/compression/mesh/mesh_encoder_test.cc" + "${draco_src_root}/compression/point_cloud/point_cloud_kd_tree_encoding_test.cc" + "${draco_src_root}/compression/point_cloud/point_cloud_sequential_encoding_test.cc" + "${draco_src_root}/core/buffer_bit_coding_test.cc" + "${draco_src_root}/core/draco_test_base.h" + "${draco_src_root}/core/draco_test_utils.cc" + "${draco_src_root}/core/draco_test_utils.h" + "${draco_src_root}/core/math_utils_test.cc" + "${draco_src_root}/core/quantization_utils_test.cc" + "${draco_src_root}/core/status_test.cc" + "${draco_src_root}/core/vector_d_test.cc" + "${draco_src_root}/io/file_reader_test_common.h" + "${draco_src_root}/io/file_utils_test.cc" + "${draco_src_root}/io/stdio_file_reader_test.cc" + "${draco_src_root}/io/stdio_file_writer_test.cc" + "${draco_src_root}/io/obj_decoder_test.cc" + "${draco_src_root}/io/obj_encoder_test.cc" + "${draco_src_root}/io/ply_decoder_test.cc" + "${draco_src_root}/io/ply_reader_test.cc" + "${draco_src_root}/io/point_cloud_io_test.cc" + "${draco_src_root}/mesh/mesh_are_equivalent_test.cc" + "${draco_src_root}/mesh/mesh_cleanup_test.cc" + "${draco_src_root}/mesh/triangle_soup_mesh_builder_test.cc" + "${draco_src_root}/metadata/metadata_encoder_test.cc" + "${draco_src_root}/metadata/metadata_test.cc" + "${draco_src_root}/point_cloud/point_cloud_builder_test.cc" + "${draco_src_root}/point_cloud/point_cloud_test.cc") + +list(APPEND draco_gtest_all + "${draco_root}/../googletest/googletest/src/gtest-all.cc") +list(APPEND draco_gtest_main + "${draco_root}/../googletest/googletest/src/gtest_main.cc") + +macro(draco_setup_test_targets) + if(DRACO_TESTS) + if(NOT (EXISTS ${draco_gtest_all} AND EXISTS ${draco_gtest_main})) + message(FATAL "googletest must be a sibling directory of ${draco_root}.") + endif() + + list(APPEND draco_test_defines GTEST_HAS_PTHREAD=0) + + draco_add_library(TEST + NAME + draco_gtest + TYPE + STATIC + SOURCES + ${draco_gtest_all} + DEFINES + ${draco_defines} + ${draco_test_defines} + INCLUDES + ${draco_test_include_paths}) + + draco_add_library(TEST + NAME + draco_gtest_main + TYPE + STATIC + SOURCES + ${draco_gtest_main} + DEFINES + ${draco_defines} + ${draco_test_defines} + INCLUDES + ${draco_test_include_paths}) + + set(DRACO_TEST_DATA_DIR "${draco_root}/testdata") + set(DRACO_TEST_TEMP_DIR "${draco_build}/draco_test_temp") + file(MAKE_DIRECTORY "${DRACO_TEST_TEMP_DIR}") + + # Sets DRACO_TEST_DATA_DIR and DRACO_TEST_TEMP_DIR. + configure_file("${draco_root}/cmake/draco_test_config.h.cmake" + "${draco_build}/testing/draco_test_config.h") + + # Create the test targets. + draco_add_executable(NAME + draco_tests + SOURCES + ${draco_test_sources} + DEFINES + ${draco_defines} + ${draco_test_defines} + INCLUDES + ${draco_test_include_paths} + LIB_DEPS + draco_static + draco_gtest + draco_gtest_main) + + draco_add_executable(NAME + draco_factory_tests + SOURCES + ${draco_factory_test_sources} + DEFINES + ${draco_defines} + ${draco_test_defines} + INCLUDES + ${draco_test_include_paths} + LIB_DEPS + draco_static + draco_gtest + draco_gtest_main) + endif() +endmacro() diff --git a/contrib/draco/cmake/draco_variables.cmake b/contrib/draco/cmake/draco_variables.cmake new file mode 100644 index 000000000..8dbc77a53 --- /dev/null +++ b/contrib/draco/cmake/draco_variables.cmake @@ -0,0 +1,64 @@ +if(DRACO_CMAKE_DRACO_VARIABLES_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_VARIABLES_CMAKE_ +set(DRACO_CMAKE_DRACO_VARIABLES_CMAKE_ 1) + +# Halts generation when $variable_name does not refer to a directory that +# exists. +macro(draco_variable_must_be_directory variable_name) + if("${variable_name}" STREQUAL "") + message( + FATAL_ERROR + "Empty variable_name passed to draco_variable_must_be_directory.") + endif() + + if("${${variable_name}}" STREQUAL "") + message( + FATAL_ERROR + "Empty variable ${variable_name} is required to build draco.") + endif() + + if(NOT IS_DIRECTORY "${${variable_name}}") + message( + FATAL_ERROR + "${variable_name}, which is ${${variable_name}}, does not refer to a\n" + "directory.") + endif() +endmacro() + +# Adds $var_name to the tracked variables list. +macro(draco_track_configuration_variable var_name) + if(DRACO_VERBOSE GREATER 2) + message("---- draco_track_configuration_variable ----\n" + "var_name=${var_name}\n" + "----------------------------------------------\n") + endif() + + list(APPEND draco_configuration_variables ${var_name}) + list(REMOVE_DUPLICATES draco_configuration_variables) +endmacro() + +# Logs current C++ and executable linker flags via the CMake message command. +macro(draco_dump_cmake_flag_variables) + unset(flag_variables) + list(APPEND flag_variables "CMAKE_CXX_FLAGS_INIT" "CMAKE_CXX_FLAGS" + "CMAKE_EXE_LINKER_FLAGS_INIT" "CMAKE_EXE_LINKER_FLAGS") + if(CMAKE_BUILD_TYPE) + list(APPEND flag_variables "CMAKE_BUILD_TYPE" + "CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE}_INIT" + "CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE}" + "CMAKE_EXE_LINKER_FLAGS_${CMAKE_BUILD_TYPE}_INIT" + "CMAKE_EXE_LINKER_FLAGS_${CMAKE_BUILD_TYPE}") + endif() + foreach(flag_variable ${flag_variables}) + message("${flag_variable}:${${flag_variable}}") + endforeach() +endmacro() + +# Dumps the variables tracked in $draco_configuration_variables via the CMake +# message command. +macro(draco_dump_tracked_configuration_variables) + foreach(config_variable ${draco_configuration_variables}) + message("${config_variable}:${${config_variable}}") + endforeach() +endmacro() diff --git a/contrib/draco/cmake/sanitizers.cmake b/contrib/draco/cmake/sanitizers.cmake new file mode 100644 index 000000000..e720bc045 --- /dev/null +++ b/contrib/draco/cmake/sanitizers.cmake @@ -0,0 +1,19 @@ +if(DRACO_CMAKE_SANITIZERS_CMAKE_) + return() +endif() +set(DRACO_CMAKE_SANITIZERS_CMAKE_ 1) + +if(MSVC OR NOT SANITIZE) + return() +endif() + +include("${draco_root}/cmake/compiler_flags.cmake") + +string(TOLOWER ${SANITIZE} SANITIZE) + +# Require the sanitizer requested. +require_linker_flag("-fsanitize=${SANITIZE}") +require_compiler_flag("-fsanitize=${SANITIZE}" YES) + +# Make callstacks accurate. +require_compiler_flag("-fno-omit-frame-pointer -fno-optimize-sibling-calls" YES) diff --git a/contrib/draco/cmake/toolchains/aarch64-linux-gnu.cmake b/contrib/draco/cmake/toolchains/aarch64-linux-gnu.cmake new file mode 100644 index 000000000..87e0b4a45 --- /dev/null +++ b/contrib/draco/cmake/toolchains/aarch64-linux-gnu.cmake @@ -0,0 +1,14 @@ +if(DRACO_CMAKE_TOOLCHAINS_AARCH64_LINUX_GNU_CMAKE_) + return() +endif() # DRACO_CMAKE_TOOLCHAINS_AARCH64_LINUX_GNU_CMAKE_ +set(DRACO_CMAKE_TOOLCHAINS_AARCH64_LINUX_GNU_CMAKE_ 1) + +set(CMAKE_SYSTEM_NAME "Linux") + +if("${CROSS}" STREQUAL "") + set(CROSS aarch64-linux-gnu-) +endif() + +set(CMAKE_CXX_COMPILER ${CROSS}g++) +set(CMAKE_CXX_FLAGS_INIT "-march=armv8-a") +set(CMAKE_SYSTEM_PROCESSOR "aarch64") diff --git a/contrib/draco/cmake/toolchains/android-ndk-common.cmake b/contrib/draco/cmake/toolchains/android-ndk-common.cmake new file mode 100644 index 000000000..5126d6e29 --- /dev/null +++ b/contrib/draco/cmake/toolchains/android-ndk-common.cmake @@ -0,0 +1,23 @@ +if(DRACO_CMAKE_TOOLCHAINS_ANDROID_NDK_COMMON_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_ANDROID_NDK_COMMON_CMAKE_ 1) + +# Toolchain files do not have access to cached variables: +# https://gitlab.kitware.com/cmake/cmake/issues/16170. Set an intermediate +# environment variable when loaded the first time. +if(DRACO_ANDROID_NDK_PATH) + set(ENV{DRACO_ANDROID_NDK_PATH} "${DRACO_ANDROID_NDK_PATH}") +else() + set(DRACO_ANDROID_NDK_PATH "$ENV{DRACO_ANDROID_NDK_PATH}") +endif() + +set(CMAKE_SYSTEM_NAME Android) + +if(NOT CMAKE_ANDROID_STL_TYPE) + set(CMAKE_ANDROID_STL_TYPE c++_static) +endif() + +if(NOT CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION) + set(CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION clang) +endif() diff --git a/contrib/draco/cmake/toolchains/android.cmake b/contrib/draco/cmake/toolchains/android.cmake new file mode 100644 index 000000000..b8f576d5e --- /dev/null +++ b/contrib/draco/cmake/toolchains/android.cmake @@ -0,0 +1,39 @@ +if(DRACO_CMAKE_TOOLCHAINS_ANDROID_CMAKE_) + return() +endif() # DRACO_CMAKE_TOOLCHAINS_ANDROID_CMAKE_ + +# Additional ANDROID_* settings are available, see: +# https://developer.android.com/ndk/guides/cmake#variables + +if(NOT ANDROID_PLATFORM) + set(ANDROID_PLATFORM android-21) +endif() + +# Choose target architecture with: +# +# -DANDROID_ABI={armeabi-v7a,armeabi-v7a with NEON,arm64-v8a,x86,x86_64} +if(NOT ANDROID_ABI) + set(ANDROID_ABI arm64-v8a) +endif() + +# Force arm mode for 32-bit targets (instead of the default thumb) to improve +# performance. +if(NOT ANDROID_ARM_MODE) + set(ANDROID_ARM_MODE arm) +endif() + +# Toolchain files do not have access to cached variables: +# https://gitlab.kitware.com/cmake/cmake/issues/16170. Set an intermediate +# environment variable when loaded the first time. +if(DRACO_ANDROID_NDK_PATH) + set(ENV{DRACO_ANDROID_NDK_PATH} "${DRACO_ANDROID_NDK_PATH}") +else() + set(DRACO_ANDROID_NDK_PATH "$ENV{DRACO_ANDROID_NDK_PATH}") +endif() + +if(NOT DRACO_ANDROID_NDK_PATH) + message(FATAL_ERROR "DRACO_ANDROID_NDK_PATH not set.") + return() +endif() + +include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake") diff --git a/contrib/draco/cmake/toolchains/arm-ios-common.cmake b/contrib/draco/cmake/toolchains/arm-ios-common.cmake new file mode 100644 index 000000000..65326d1c2 --- /dev/null +++ b/contrib/draco/cmake/toolchains/arm-ios-common.cmake @@ -0,0 +1,17 @@ +if(DRACO_CMAKE_TOOLCHAINS_ARM_IOS_COMMON_CMAKE_) + return() +endif() +set(DRACO_CMAKE_ARM_IOS_COMMON_CMAKE_ 1) + +set(CMAKE_SYSTEM_NAME "Darwin") +if(CMAKE_OSX_SDK) + set(CMAKE_OSX_SYSROOT ${CMAKE_OSX_SDK}) +else() + set(CMAKE_OSX_SYSROOT iphoneos) +endif() +set(CMAKE_C_COMPILER clang) +set(CMAKE_C_COMPILER_ARG1 "-arch ${CMAKE_SYSTEM_PROCESSOR}") +set(CMAKE_CXX_COMPILER clang++) +set(CMAKE_CXX_COMPILER_ARG1 "-arch ${CMAKE_SYSTEM_PROCESSOR}") + +# TODO(tomfinegan): Handle bit code embedding. diff --git a/contrib/draco/cmake/toolchains/arm-linux-gnueabihf.cmake b/contrib/draco/cmake/toolchains/arm-linux-gnueabihf.cmake new file mode 100644 index 000000000..6e45969e9 --- /dev/null +++ b/contrib/draco/cmake/toolchains/arm-linux-gnueabihf.cmake @@ -0,0 +1,15 @@ +if(DRACO_CMAKE_TOOLCHAINS_ARM_LINUX_GNUEABIHF_CMAKE_) + return() +endif() # DRACO_CMAKE_TOOLCHAINS_ARM_LINUX_GNUEABIHF_CMAKE_ +set(DRACO_CMAKE_TOOLCHAINS_ARM_LINUX_GNUEABIHF_CMAKE_ 1) + +set(CMAKE_SYSTEM_NAME "Linux") + +if("${CROSS}" STREQUAL "") + set(CROSS arm-linux-gnueabihf-) +endif() + +set(CMAKE_CXX_COMPILER ${CROSS}g++) +set(CMAKE_CXX_FLAGS_INIT "-march=armv7-a -marm") +set(CMAKE_SYSTEM_PROCESSOR "armv7") +set(DRACO_NEON_INTRINSICS_FLAG "-mfpu=neon") diff --git a/contrib/draco/cmake/toolchains/arm64-android-ndk-libcpp.cmake b/contrib/draco/cmake/toolchains/arm64-android-ndk-libcpp.cmake new file mode 100644 index 000000000..4b6d366f0 --- /dev/null +++ b/contrib/draco/cmake/toolchains/arm64-android-ndk-libcpp.cmake @@ -0,0 +1,16 @@ +if(DRACO_CMAKE_TOOLCHAINS_ARM64_ANDROID_NDK_LIBCPP_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_ARM64_ANDROID_NDK_LIBCPP_CMAKE_ 1) + +include("${CMAKE_CURRENT_LIST_DIR}/android-ndk-common.cmake") + +if(NOT ANDROID_PLATFORM) + set(ANROID_PLATFORM android-21) +endif() + +if(NOT ANDROID_ABI) + set(ANDROID_ABI arm64-v8a) +endif() + +include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake") diff --git a/contrib/draco/cmake/toolchains/arm64-ios.cmake b/contrib/draco/cmake/toolchains/arm64-ios.cmake new file mode 100644 index 000000000..c4ec7e3fa --- /dev/null +++ b/contrib/draco/cmake/toolchains/arm64-ios.cmake @@ -0,0 +1,14 @@ +if(DRACO_CMAKE_TOOLCHAINS_ARM64_IOS_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_ARM64_IOS_CMAKE_ 1) + +if(XCODE) + # TODO(tomfinegan): Handle arm builds in Xcode. + message(FATAL_ERROR "This toolchain does not support Xcode.") +endif() + +set(CMAKE_SYSTEM_PROCESSOR "arm64") +set(CMAKE_OSX_ARCHITECTURES "arm64") + +include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake") diff --git a/contrib/draco/cmake/toolchains/arm64-linux-gcc.cmake b/contrib/draco/cmake/toolchains/arm64-linux-gcc.cmake new file mode 100644 index 000000000..046ff0139 --- /dev/null +++ b/contrib/draco/cmake/toolchains/arm64-linux-gcc.cmake @@ -0,0 +1,18 @@ +if(DRACO_CMAKE_TOOLCHAINS_ARM64_LINUX_GCC_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_ARM64_LINUX_GCC_CMAKE_ 1) + +set(CMAKE_SYSTEM_NAME "Linux") + +if("${CROSS}" STREQUAL "") + # Default the cross compiler prefix to something known to work. + set(CROSS aarch64-linux-gnu-) +endif() + +set(CMAKE_C_COMPILER ${CROSS}gcc) +set(CMAKE_CXX_COMPILER ${CROSS}g++) +set(AS_EXECUTABLE ${CROSS}as) +set(CMAKE_C_COMPILER_ARG1 "-march=armv8-a") +set(CMAKE_CXX_COMPILER_ARG1 "-march=armv8-a") +set(CMAKE_SYSTEM_PROCESSOR "arm64") diff --git a/contrib/draco/cmake/toolchains/armv7-android-ndk-libcpp.cmake b/contrib/draco/cmake/toolchains/armv7-android-ndk-libcpp.cmake new file mode 100644 index 000000000..80ee98b18 --- /dev/null +++ b/contrib/draco/cmake/toolchains/armv7-android-ndk-libcpp.cmake @@ -0,0 +1,16 @@ +if(DRACO_CMAKE_TOOLCHAINS_ARMV7_ANDROID_NDK_LIBCPP_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_ARMV7_ANDROID_NDK_LIBCPP_CMAKE_ 1) + +include("${CMAKE_CURRENT_LIST_DIR}/android-ndk-common.cmake") + +if(NOT ANDROID_PLATFORM) + set(ANDROID_PLATFORM android-18) +endif() + +if(NOT ANDROID_ABI) + set(ANDROID_ABI armeabi-v7a) +endif() + +include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake") diff --git a/contrib/draco/cmake/toolchains/armv7-ios.cmake b/contrib/draco/cmake/toolchains/armv7-ios.cmake new file mode 100644 index 000000000..8ddd6997b --- /dev/null +++ b/contrib/draco/cmake/toolchains/armv7-ios.cmake @@ -0,0 +1,14 @@ +if(DRACO_CMAKE_TOOLCHAINS_ARMV7_IOS_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_ARMV7_IOS_CMAKE_ 1) + +if(XCODE) + # TODO(tomfinegan): Handle arm builds in Xcode. + message(FATAL_ERROR "This toolchain does not support Xcode.") +endif() + +set(CMAKE_SYSTEM_PROCESSOR "armv7") +set(CMAKE_OSX_ARCHITECTURES "armv7") + +include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake") diff --git a/contrib/draco/cmake/toolchains/armv7-linux-gcc.cmake b/contrib/draco/cmake/toolchains/armv7-linux-gcc.cmake new file mode 100644 index 000000000..9c9472319 --- /dev/null +++ b/contrib/draco/cmake/toolchains/armv7-linux-gcc.cmake @@ -0,0 +1,24 @@ +if(DRACO_CMAKE_TOOLCHAINS_ARMV7_LINUX_GCC_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_ARMV7_LINUX_GCC_CMAKE_ 1) + +set(CMAKE_SYSTEM_NAME "Linux") + +if("${CROSS}" STREQUAL "") + # Default the cross compiler prefix to something known to work. + set(CROSS arm-linux-gnueabihf-) +endif() + +if(NOT ${CROSS} MATCHES hf-$) + set(DRACO_EXTRA_TOOLCHAIN_FLAGS "-mfloat-abi=softfp") +endif() + +set(CMAKE_C_COMPILER ${CROSS}gcc) +set(CMAKE_CXX_COMPILER ${CROSS}g++) +set(AS_EXECUTABLE ${CROSS}as) +set(CMAKE_C_COMPILER_ARG1 + "-march=armv7-a -mfpu=neon ${DRACO_EXTRA_TOOLCHAIN_FLAGS}") +set(CMAKE_CXX_COMPILER_ARG1 + "-march=armv7-a -mfpu=neon ${DRACO_EXTRA_TOOLCHAIN_FLAGS}") +set(CMAKE_SYSTEM_PROCESSOR "armv7") diff --git a/contrib/draco/cmake/toolchains/armv7s-ios.cmake b/contrib/draco/cmake/toolchains/armv7s-ios.cmake new file mode 100644 index 000000000..b433025ba --- /dev/null +++ b/contrib/draco/cmake/toolchains/armv7s-ios.cmake @@ -0,0 +1,14 @@ +if(DRACO_CMAKE_TOOLCHAINS_ARMV7S_IOS_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_ARMV7S_IOS_CMAKE_ 1) + +if(XCODE) + # TODO(tomfinegan): Handle arm builds in Xcode. + message(FATAL_ERROR "This toolchain does not support Xcode.") +endif() + +set(CMAKE_SYSTEM_PROCESSOR "armv7s") +set(CMAKE_OSX_ARCHITECTURES "armv7s") + +include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake") diff --git a/contrib/draco/cmake/toolchains/i386-ios.cmake b/contrib/draco/cmake/toolchains/i386-ios.cmake new file mode 100644 index 000000000..e9a105591 --- /dev/null +++ b/contrib/draco/cmake/toolchains/i386-ios.cmake @@ -0,0 +1,15 @@ +if(DRACO_CMAKE_TOOLCHAINS_i386_IOS_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_i386_IOS_CMAKE_ 1) + +if(XCODE) + # TODO(tomfinegan): Handle arm builds in Xcode. + message(FATAL_ERROR "This toolchain does not support Xcode.") +endif() + +set(CMAKE_SYSTEM_PROCESSOR "i386") +set(CMAKE_OSX_ARCHITECTURES "i386") +set(CMAKE_OSX_SDK "iphonesimulator") + +include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake") diff --git a/contrib/draco/cmake/toolchains/x86-android-ndk-libcpp.cmake b/contrib/draco/cmake/toolchains/x86-android-ndk-libcpp.cmake new file mode 100644 index 000000000..d43383640 --- /dev/null +++ b/contrib/draco/cmake/toolchains/x86-android-ndk-libcpp.cmake @@ -0,0 +1,16 @@ +if(DRACO_CMAKE_TOOLCHAINS_X86_ANDROID_NDK_LIBCPP_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_X86_ANDROID_NDK_LIBCPP_CMAKE_ 1) + +include("${CMAKE_CURRENT_LIST_DIR}/android-ndk-common.cmake") + +if(NOT ANDROID_PLATFORM) + set(ANDROID_PLATFORM android-18) +endif() + +if(NOT ANDROID_ABI) + set(ANDROID_ABI x86) +endif() + +include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake") diff --git a/contrib/draco/cmake/toolchains/x86_64-android-ndk-libcpp.cmake b/contrib/draco/cmake/toolchains/x86_64-android-ndk-libcpp.cmake new file mode 100644 index 000000000..d6fabeacc --- /dev/null +++ b/contrib/draco/cmake/toolchains/x86_64-android-ndk-libcpp.cmake @@ -0,0 +1,16 @@ +if(DRACO_CMAKE_TOOLCHAINS_X86_64_ANDROID_NDK_LIBCPP_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_X86_64_ANDROID_NDK_LIBCPP_CMAKE_ 1) + +include("${CMAKE_CURRENT_LIST_DIR}/android-ndk-common.cmake") + +if(NOT ANDROID_PLATFORM) + set(ANDROID_PLATFORM android-21) +endif() + +if(NOT ANDROID_ABI) + set(ANDROID_ABI x86_64) +endif() + +include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake") diff --git a/contrib/draco/cmake/toolchains/x86_64-ios.cmake b/contrib/draco/cmake/toolchains/x86_64-ios.cmake new file mode 100644 index 000000000..4c50a72a2 --- /dev/null +++ b/contrib/draco/cmake/toolchains/x86_64-ios.cmake @@ -0,0 +1,15 @@ +if(DRACO_CMAKE_TOOLCHAINS_X86_64_IOS_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_X86_64_IOS_CMAKE_ 1) + +if(XCODE) + # TODO(tomfinegan): Handle arm builds in Xcode. + message(FATAL_ERROR "This toolchain does not support Xcode.") +endif() + +set(CMAKE_SYSTEM_PROCESSOR "x86_64") +set(CMAKE_OSX_ARCHITECTURES "x86_64") +set(CMAKE_OSX_SDK "iphonesimulator") + +include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake") diff --git a/contrib/draco/cmake/util.cmake b/contrib/draco/cmake/util.cmake new file mode 100644 index 000000000..813146a62 --- /dev/null +++ b/contrib/draco/cmake/util.cmake @@ -0,0 +1,79 @@ +if(DRACO_CMAKE_UTIL_CMAKE_) + return() +endif() +set(DRACO_CMAKE_UTIL_CMAKE_ 1) + +# Creates dummy source file in $draco_build_dir named $basename.$extension and +# returns the full path to the dummy source file via the $out_file_path +# parameter. +function(create_dummy_source_file basename extension out_file_path) + set(dummy_source_file "${draco_build_dir}/${basename}.${extension}") + file(WRITE "${dummy_source_file}.new" + "// Generated file. DO NOT EDIT!\n" + "// ${target_name} needs a ${extension} file to force link language, \n" + "// or to silence a harmless CMake warning: Ignore me.\n" + "void ${target_name}_dummy_function(void) {}\n") + + # Will replace ${dummy_source_file} only if the file content has changed. + # This prevents forced Draco rebuilds after CMake runs. + configure_file("${dummy_source_file}.new" "${dummy_source_file}") + file(REMOVE "${dummy_source_file}.new") + + set(${out_file_path} ${dummy_source_file} PARENT_SCOPE) +endfunction() + +# Convenience function for adding a dummy source file to $target_name using +# $extension as the file extension. Wraps create_dummy_source_file(). +function(add_dummy_source_file_to_target target_name extension) + create_dummy_source_file("${target_name}" "${extension}" "dummy_source_file") + target_sources(${target_name} PRIVATE ${dummy_source_file}) +endfunction() + +# Extracts the version number from $version_file and returns it to the user via +# $version_string_out_var. This is achieved by finding the first instance of the +# kDracoVersion variable and then removing everything but the string literal +# assigned to the variable. Quotes and semicolon are stripped from the returned +# string. +function(extract_version_string version_file version_string_out_var) + file(STRINGS "${version_file}" draco_version REGEX "kDracoVersion") + list(GET draco_version 0 draco_version) + string(REPLACE "static const char kDracoVersion[] = " "" draco_version + "${draco_version}") + string(REPLACE ";" "" draco_version "${draco_version}") + string(REPLACE "\"" "" draco_version "${draco_version}") + set("${version_string_out_var}" "${draco_version}" PARENT_SCOPE) +endfunction() + +# Sets CMake compiler launcher to $launcher_name when $launcher_name is found in +# $PATH. Warns user about ignoring build flag $launcher_flag when $launcher_name +# is not found in $PATH. +function(set_compiler_launcher launcher_flag launcher_name) + find_program(launcher_path "${launcher_name}") + if(launcher_path) + set(CMAKE_C_COMPILER_LAUNCHER "${launcher_path}" PARENT_SCOPE) + set(CMAKE_CXX_COMPILER_LAUNCHER "${launcher_path}" PARENT_SCOPE) + message("--- Using ${launcher_name} as compiler launcher.") + else() + message( + WARNING "--- Cannot find ${launcher_name}, ${launcher_flag} ignored.") + endif() +endfunction() + +# Terminates CMake execution when $var_name is unset in the environment. Sets +# CMake variable to the value of the environment variable when the variable is +# present in the environment. +macro(require_variable var_name) + if("$ENV{${var_name}}" STREQUAL "") + message(FATAL_ERROR "${var_name} must be set in environment.") + endif() + set_variable_if_unset(${var_name} "") +endmacro() + +# Sets $var_name to $default_value if not already set. +macro(set_variable_if_unset var_name default_value) + if(NOT "$ENV{${var_name}}" STREQUAL "") + set(${var_name} $ENV{${var_name}}) + elseif(NOT ${var_name}) + set(${var_name} ${default_value}) + endif() +endmacro() diff --git a/contrib/draco/src/draco/animation/keyframe_animation.cc b/contrib/draco/src/draco/animation/keyframe_animation.cc new file mode 100644 index 000000000..eaf94a330 --- /dev/null +++ b/contrib/draco/src/draco/animation/keyframe_animation.cc @@ -0,0 +1,54 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/animation/keyframe_animation.h" + +namespace draco { + +KeyframeAnimation::KeyframeAnimation() {} + +bool KeyframeAnimation::SetTimestamps( + const std::vector ×tamp) { + // Already added attributes. + const int32_t num_frames = timestamp.size(); + if (num_attributes() > 0) { + // Timestamp attribute could be added only once. + if (timestamps()->size()) { + return false; + } else { + // Check if the number of frames is consistent with + // the existing keyframes. + if (num_frames != num_points()) { + return false; + } + } + } else { + // This is the first attribute. + set_num_frames(num_frames); + } + + // Add attribute for time stamp data. + std::unique_ptr timestamp_att = + std::unique_ptr(new PointAttribute()); + timestamp_att->Init(GeometryAttribute::GENERIC, 1, DT_FLOAT32, false, + num_frames); + for (PointIndex i(0); i < num_frames; ++i) { + timestamp_att->SetAttributeValue(timestamp_att->mapped_index(i), + ×tamp[i.value()]); + } + this->SetAttribute(kTimestampId, std::move(timestamp_att)); + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/animation/keyframe_animation.h b/contrib/draco/src/draco/animation/keyframe_animation.h new file mode 100644 index 000000000..a7afb2b81 --- /dev/null +++ b/contrib/draco/src/draco/animation/keyframe_animation.h @@ -0,0 +1,107 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ANIMATION_KEYFRAME_ANIMATION_H_ +#define DRACO_ANIMATION_KEYFRAME_ANIMATION_H_ + +#include + +#include "draco/point_cloud/point_cloud.h" + +namespace draco { + +// Class for holding keyframe animation data. It will have two or more +// attributes as a point cloud. The first attribute is always the timestamp +// of the animation. Each KeyframeAnimation could have multiple animations with +// the same number of frames. Each animation will be treated as a point +// attribute. +class KeyframeAnimation : public PointCloud { + public: + // Force time stamp to be float type. + using TimestampType = float; + + KeyframeAnimation(); + + // Animation must have only one timestamp attribute. + // This function must be called before adding any animation data. + // Returns false if timestamp already exists. + bool SetTimestamps(const std::vector ×tamp); + + // Returns an id for the added animation data. This id will be used to + // identify this animation. + // Returns -1 if error, e.g. number of frames is not consistent. + // Type |T| should be consistent with |DataType|, e.g: + // float - DT_FLOAT32, + // int32_t - DT_INT32, ... + template + int32_t AddKeyframes(DataType data_type, uint32_t num_components, + const std::vector &data); + + const PointAttribute *timestamps() const { + return GetAttributeByUniqueId(kTimestampId); + } + const PointAttribute *keyframes(int32_t animation_id) const { + return GetAttributeByUniqueId(animation_id); + } + + // Number of frames should be equal to number points in the point cloud. + void set_num_frames(int32_t num_frames) { set_num_points(num_frames); } + int32_t num_frames() const { return static_cast(num_points()); } + + int32_t num_animations() const { return num_attributes() - 1; } + + private: + // Attribute id of timestamp is fixed to 0. + static constexpr int32_t kTimestampId = 0; +}; + +template +int32_t KeyframeAnimation::AddKeyframes(DataType data_type, + uint32_t num_components, + const std::vector &data) { + // TODO(draco-eng): Verify T is consistent with |data_type|. + if (num_components == 0) { + return -1; + } + // If timestamps is not added yet, then reserve attribute 0 for timestamps. + if (!num_attributes()) { + // Add a temporary attribute with 0 points to fill attribute id 0. + std::unique_ptr temp_att = + std::unique_ptr(new PointAttribute()); + temp_att->Init(GeometryAttribute::GENERIC, num_components, data_type, false, + 0); + this->AddAttribute(std::move(temp_att)); + + set_num_frames(data.size() / num_components); + } + + if (data.size() != num_components * num_frames()) { + return -1; + } + + std::unique_ptr keyframe_att = + std::unique_ptr(new PointAttribute()); + keyframe_att->Init(GeometryAttribute::GENERIC, num_components, data_type, + false, num_frames()); + const size_t stride = num_components; + for (PointIndex i(0); i < num_frames(); ++i) { + keyframe_att->SetAttributeValue(keyframe_att->mapped_index(i), + &data[i.value() * stride]); + } + return this->AddAttribute(std::move(keyframe_att)); +} + +} // namespace draco + +#endif // DRACO_ANIMATION_KEYFRAME_ANIMATION_H_ diff --git a/contrib/draco/src/draco/animation/keyframe_animation_decoder.cc b/contrib/draco/src/draco/animation/keyframe_animation_decoder.cc new file mode 100644 index 000000000..20659468d --- /dev/null +++ b/contrib/draco/src/draco/animation/keyframe_animation_decoder.cc @@ -0,0 +1,30 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/animation/keyframe_animation_decoder.h" + +namespace draco { + +Status KeyframeAnimationDecoder::Decode(const DecoderOptions &options, + DecoderBuffer *in_buffer, + KeyframeAnimation *animation) { + const auto status = PointCloudSequentialDecoder::Decode( + options, in_buffer, static_cast(animation)); + if (!status.ok()) { + return status; + } + return OkStatus(); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/animation/keyframe_animation_decoder.h b/contrib/draco/src/draco/animation/keyframe_animation_decoder.h new file mode 100644 index 000000000..fdf086b3a --- /dev/null +++ b/contrib/draco/src/draco/animation/keyframe_animation_decoder.h @@ -0,0 +1,34 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ANIMATION_KEYFRAME_ANIMATION_DECODER_H_ +#define DRACO_ANIMATION_KEYFRAME_ANIMATION_DECODER_H_ + +#include "draco/animation/keyframe_animation.h" +#include "draco/compression/point_cloud/point_cloud_sequential_decoder.h" + +namespace draco { + +// Class for decoding keyframe animation. +class KeyframeAnimationDecoder : private PointCloudSequentialDecoder { + public: + KeyframeAnimationDecoder(){}; + + Status Decode(const DecoderOptions &options, DecoderBuffer *in_buffer, + KeyframeAnimation *animation); +}; + +} // namespace draco + +#endif // DRACO_ANIMATION_KEYFRAME_ANIMATION_DECODER_H_ diff --git a/contrib/draco/src/draco/animation/keyframe_animation_encoder.cc b/contrib/draco/src/draco/animation/keyframe_animation_encoder.cc new file mode 100644 index 000000000..f7d84f310 --- /dev/null +++ b/contrib/draco/src/draco/animation/keyframe_animation_encoder.cc @@ -0,0 +1,28 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/animation/keyframe_animation_encoder.h" + +namespace draco { + +KeyframeAnimationEncoder::KeyframeAnimationEncoder() {} + +Status KeyframeAnimationEncoder::EncodeKeyframeAnimation( + const KeyframeAnimation &animation, const EncoderOptions &options, + EncoderBuffer *out_buffer) { + SetPointCloud(animation); + return Encode(options, out_buffer); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/animation/keyframe_animation_encoder.h b/contrib/draco/src/draco/animation/keyframe_animation_encoder.h new file mode 100644 index 000000000..6096c79fa --- /dev/null +++ b/contrib/draco/src/draco/animation/keyframe_animation_encoder.h @@ -0,0 +1,39 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ANIMATION_KEYFRAME_ANIMATION_ENCODER_H_ +#define DRACO_ANIMATION_KEYFRAME_ANIMATION_ENCODER_H_ + +#include "draco/animation/keyframe_animation.h" +#include "draco/compression/point_cloud/point_cloud_sequential_encoder.h" + +namespace draco { + +// Class for encoding keyframe animation. It takes KeyframeAnimation as a +// PointCloud and compress it. It's mostly a wrapper around PointCloudEncoder so +// that the animation module could be separated from geometry compression when +// exposed to developers. +class KeyframeAnimationEncoder : private PointCloudSequentialEncoder { + public: + KeyframeAnimationEncoder(); + + // Encode an animation to a buffer. + Status EncodeKeyframeAnimation(const KeyframeAnimation &animation, + const EncoderOptions &options, + EncoderBuffer *out_buffer); +}; + +} // namespace draco + +#endif // DRACO_ANIMATION_KEYFRAME_ANIMATION_ENCODER_H_ diff --git a/contrib/draco/src/draco/animation/keyframe_animation_encoding_test.cc b/contrib/draco/src/draco/animation/keyframe_animation_encoding_test.cc new file mode 100644 index 000000000..4a6491f9d --- /dev/null +++ b/contrib/draco/src/draco/animation/keyframe_animation_encoding_test.cc @@ -0,0 +1,168 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/animation/keyframe_animation.h" +#include "draco/animation/keyframe_animation_decoder.h" +#include "draco/animation/keyframe_animation_encoder.h" +#include "draco/core/draco_test_base.h" +#include "draco/core/draco_test_utils.h" + +namespace draco { + +class KeyframeAnimationEncodingTest : public ::testing::Test { + protected: + KeyframeAnimationEncodingTest() {} + + bool CreateAndAddTimestamps(int32_t num_frames) { + timestamps_.resize(num_frames); + for (int i = 0; i < timestamps_.size(); ++i) + timestamps_[i] = static_cast(i); + return keyframe_animation_.SetTimestamps(timestamps_); + } + + int32_t CreateAndAddAnimationData(int32_t num_frames, + uint32_t num_components) { + // Create and add animation data with. + animation_data_.resize(num_frames * num_components); + for (int i = 0; i < animation_data_.size(); ++i) + animation_data_[i] = static_cast(i); + return keyframe_animation_.AddKeyframes(draco::DT_FLOAT32, num_components, + animation_data_); + } + + template + void CompareAnimationData(const KeyframeAnimation &animation0, + const KeyframeAnimation &animation1, + bool quantized) { + ASSERT_EQ(animation0.num_frames(), animation1.num_frames()); + ASSERT_EQ(animation0.num_animations(), animation1.num_animations()); + + if (quantized) { + // TODO(hemmer) : Add test for stable quantization. + // Quantization will result in slightly different values. + // Skip comparing values. + return; + } + + // Compare time stamp. + const auto timestamp_att0 = animation0.timestamps(); + const auto timestamp_att1 = animation0.timestamps(); + for (int i = 0; i < animation0.num_frames(); ++i) { + std::array att_value0; + std::array att_value1; + ASSERT_TRUE((timestamp_att0->GetValue( + draco::AttributeValueIndex(i), &att_value0))); + ASSERT_TRUE((timestamp_att1->GetValue( + draco::AttributeValueIndex(i), &att_value1))); + ASSERT_FLOAT_EQ(att_value0[0], att_value1[0]); + } + + for (int animation_id = 1; animation_id < animation0.num_animations(); + ++animation_id) { + // Compare keyframe data. + const auto keyframe_att0 = animation0.keyframes(animation_id); + const auto keyframe_att1 = animation1.keyframes(animation_id); + ASSERT_EQ(keyframe_att0->num_components(), + keyframe_att1->num_components()); + for (int i = 0; i < animation0.num_frames(); ++i) { + std::array att_value0; + std::array att_value1; + ASSERT_TRUE((keyframe_att0->GetValue( + draco::AttributeValueIndex(i), &att_value0))); + ASSERT_TRUE((keyframe_att1->GetValue( + draco::AttributeValueIndex(i), &att_value1))); + for (int j = 0; j < att_value0.size(); ++j) { + ASSERT_FLOAT_EQ(att_value0[j], att_value1[j]); + } + } + } + } + + template + void TestKeyframeAnimationEncoding() { + TestKeyframeAnimationEncoding(false); + } + + template + void TestKeyframeAnimationEncoding(bool quantized) { + // Encode animation class. + draco::EncoderBuffer buffer; + draco::KeyframeAnimationEncoder encoder; + EncoderOptions options = EncoderOptions::CreateDefaultOptions(); + if (quantized) { + // Set quantization for timestamps. + options.SetAttributeInt(0, "quantization_bits", 20); + // Set quantization for keyframes. + for (int i = 1; i <= keyframe_animation_.num_animations(); ++i) { + options.SetAttributeInt(i, "quantization_bits", 20); + } + } + + ASSERT_TRUE( + encoder.EncodeKeyframeAnimation(keyframe_animation_, options, &buffer) + .ok()); + + draco::DecoderBuffer dec_decoder; + draco::KeyframeAnimationDecoder decoder; + DecoderBuffer dec_buffer; + dec_buffer.Init(buffer.data(), buffer.size()); + + // Decode animation class. + std::unique_ptr decoded_animation( + new KeyframeAnimation()); + DecoderOptions dec_options; + ASSERT_TRUE( + decoder.Decode(dec_options, &dec_buffer, decoded_animation.get()).ok()); + + // Verify if animation before and after compression is identical. + CompareAnimationData(keyframe_animation_, + *decoded_animation, quantized); + } + + draco::KeyframeAnimation keyframe_animation_; + std::vector timestamps_; + std::vector animation_data_; +}; + +TEST_F(KeyframeAnimationEncodingTest, OneComponent) { + const int num_frames = 1; + ASSERT_TRUE(CreateAndAddTimestamps(num_frames)); + ASSERT_EQ(CreateAndAddAnimationData(num_frames, 1), 1); + TestKeyframeAnimationEncoding<1>(); +} + +TEST_F(KeyframeAnimationEncodingTest, ManyComponents) { + const int num_frames = 100; + ASSERT_TRUE(CreateAndAddTimestamps(num_frames)); + ASSERT_EQ(CreateAndAddAnimationData(num_frames, 100), 1); + TestKeyframeAnimationEncoding<100>(); +} + +TEST_F(KeyframeAnimationEncodingTest, ManyComponentsWithQuantization) { + const int num_frames = 100; + ASSERT_TRUE(CreateAndAddTimestamps(num_frames)); + ASSERT_EQ(CreateAndAddAnimationData(num_frames, 4), 1); + // Test compression with quantization. + TestKeyframeAnimationEncoding<4>(true); +} + +TEST_F(KeyframeAnimationEncodingTest, MultipleAnimations) { + const int num_frames = 5; + ASSERT_TRUE(CreateAndAddTimestamps(num_frames)); + ASSERT_EQ(CreateAndAddAnimationData(num_frames, 3), 1); + ASSERT_EQ(CreateAndAddAnimationData(num_frames, 3), 2); + TestKeyframeAnimationEncoding<3>(); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/animation/keyframe_animation_test.cc b/contrib/draco/src/draco/animation/keyframe_animation_test.cc new file mode 100644 index 000000000..bc92b25ff --- /dev/null +++ b/contrib/draco/src/draco/animation/keyframe_animation_test.cc @@ -0,0 +1,102 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/animation/keyframe_animation.h" + +#include "draco/core/draco_test_base.h" + +namespace { + +class KeyframeAnimationTest : public ::testing::Test { + protected: + KeyframeAnimationTest() {} + + bool CreateAndAddTimestamps(int32_t num_frames) { + timestamps_.resize(num_frames); + for (int i = 0; i < timestamps_.size(); ++i) + timestamps_[i] = static_cast(i); + return keyframe_animation_.SetTimestamps(timestamps_); + } + + int32_t CreateAndAddAnimationData(int32_t num_frames, + uint32_t num_components) { + // Create and add animation data with. + animation_data_.resize(num_frames * num_components); + for (int i = 0; i < animation_data_.size(); ++i) + animation_data_[i] = static_cast(i); + return keyframe_animation_.AddKeyframes(draco::DT_FLOAT32, num_components, + animation_data_); + } + + template + void CompareAnimationData() { + // Compare time stamp. + const auto timestamp_att = keyframe_animation_.timestamps(); + for (int i = 0; i < timestamps_.size(); ++i) { + std::array att_value; + ASSERT_TRUE((timestamp_att->GetValue( + draco::AttributeValueIndex(i), &att_value))); + ASSERT_FLOAT_EQ(att_value[0], i); + } + + // Compare keyframe data. + const auto keyframe_att = keyframe_animation_.keyframes(1); + for (int i = 0; i < animation_data_.size() / num_components_t; ++i) { + std::array att_value; + ASSERT_TRUE((keyframe_att->GetValue( + draco::AttributeValueIndex(i), &att_value))); + for (int j = 0; j < num_components_t; ++j) { + ASSERT_FLOAT_EQ(att_value[j], i * num_components_t + j); + } + } + } + + template + void TestKeyframeAnimation(int32_t num_frames) { + ASSERT_TRUE(CreateAndAddTimestamps(num_frames)); + ASSERT_EQ(CreateAndAddAnimationData(num_frames, num_components_t), 1); + CompareAnimationData(); + } + + draco::KeyframeAnimation keyframe_animation_; + std::vector timestamps_; + std::vector animation_data_; +}; + +// Test animation with 1 component and 10 frames. +TEST_F(KeyframeAnimationTest, OneComponent) { TestKeyframeAnimation<1>(10); } + +// Test animation with 4 component and 10 frames. +TEST_F(KeyframeAnimationTest, FourComponent) { TestKeyframeAnimation<4>(10); } + +// Test adding animation data before timestamp. +TEST_F(KeyframeAnimationTest, AddingAnimationFirst) { + ASSERT_EQ(CreateAndAddAnimationData(5, 1), 1); + ASSERT_TRUE(CreateAndAddTimestamps(5)); +} + +// Test adding timestamp more than once. +TEST_F(KeyframeAnimationTest, ErrorAddingTimestampsTwice) { + ASSERT_TRUE(CreateAndAddTimestamps(5)); + ASSERT_FALSE(CreateAndAddTimestamps(5)); +} +// Test animation with multiple animation data. +TEST_F(KeyframeAnimationTest, MultipleAnimationData) { + const int num_frames = 5; + ASSERT_TRUE(CreateAndAddTimestamps(num_frames)); + ASSERT_EQ(CreateAndAddAnimationData(num_frames, 1), 1); + ASSERT_EQ(CreateAndAddAnimationData(num_frames, 2), 2); +} + +} // namespace diff --git a/contrib/draco/src/draco/attributes/attribute_octahedron_transform.cc b/contrib/draco/src/draco/attributes/attribute_octahedron_transform.cc new file mode 100644 index 000000000..51c3bb6c8 --- /dev/null +++ b/contrib/draco/src/draco/attributes/attribute_octahedron_transform.cc @@ -0,0 +1,145 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "draco/attributes/attribute_octahedron_transform.h" + +#include "draco/attributes/attribute_transform_type.h" +#include "draco/compression/attributes/normal_compression_utils.h" + +namespace draco { + +bool AttributeOctahedronTransform::InitFromAttribute( + const PointAttribute &attribute) { + const AttributeTransformData *const transform_data = + attribute.GetAttributeTransformData(); + if (!transform_data || + transform_data->transform_type() != ATTRIBUTE_OCTAHEDRON_TRANSFORM) { + return false; // Wrong transform type. + } + quantization_bits_ = transform_data->GetParameterValue(0); + return true; +} + +void AttributeOctahedronTransform::CopyToAttributeTransformData( + AttributeTransformData *out_data) const { + out_data->set_transform_type(ATTRIBUTE_OCTAHEDRON_TRANSFORM); + out_data->AppendParameterValue(quantization_bits_); +} + +bool AttributeOctahedronTransform::TransformAttribute( + const PointAttribute &attribute, const std::vector &point_ids, + PointAttribute *target_attribute) { + return GeneratePortableAttribute(attribute, point_ids, + target_attribute->size(), target_attribute); +} + +bool AttributeOctahedronTransform::InverseTransformAttribute( + const PointAttribute &attribute, PointAttribute *target_attribute) { + if (target_attribute->data_type() != DT_FLOAT32) { + return false; + } + + const int num_points = target_attribute->size(); + const int num_components = target_attribute->num_components(); + if (num_components != 3) { + return false; + } + constexpr int kEntrySize = sizeof(float) * 3; + float att_val[3]; + const int32_t *source_attribute_data = reinterpret_cast( + attribute.GetAddress(AttributeValueIndex(0))); + uint8_t *target_address = + target_attribute->GetAddress(AttributeValueIndex(0)); + OctahedronToolBox octahedron_tool_box; + if (!octahedron_tool_box.SetQuantizationBits(quantization_bits_)) { + return false; + } + for (uint32_t i = 0; i < num_points; ++i) { + const int32_t s = *source_attribute_data++; + const int32_t t = *source_attribute_data++; + octahedron_tool_box.QuantizedOctahedralCoordsToUnitVector(s, t, att_val); + + // Store the decoded floating point values into the attribute buffer. + std::memcpy(target_address, att_val, kEntrySize); + target_address += kEntrySize; + } + return true; +} + +void AttributeOctahedronTransform::SetParameters(int quantization_bits) { + quantization_bits_ = quantization_bits; +} + +bool AttributeOctahedronTransform::EncodeParameters( + EncoderBuffer *encoder_buffer) const { + if (is_initialized()) { + encoder_buffer->Encode(static_cast(quantization_bits_)); + return true; + } + return false; +} + +bool AttributeOctahedronTransform::DecodeParameters( + const PointAttribute &attribute, DecoderBuffer *decoder_buffer) { + uint8_t quantization_bits; + if (!decoder_buffer->Decode(&quantization_bits)) { + return false; + } + quantization_bits_ = quantization_bits; + return true; +} + +bool AttributeOctahedronTransform::GeneratePortableAttribute( + const PointAttribute &attribute, const std::vector &point_ids, + int num_points, PointAttribute *target_attribute) const { + DRACO_DCHECK(is_initialized()); + + // Quantize all values in the order given by point_ids into portable + // attribute. + int32_t *const portable_attribute_data = reinterpret_cast( + target_attribute->GetAddress(AttributeValueIndex(0))); + float att_val[3]; + int32_t dst_index = 0; + OctahedronToolBox converter; + if (!converter.SetQuantizationBits(quantization_bits_)) { + return false; + } + if (!point_ids.empty()) { + for (uint32_t i = 0; i < point_ids.size(); ++i) { + const AttributeValueIndex att_val_id = + attribute.mapped_index(point_ids[i]); + attribute.GetValue(att_val_id, att_val); + // Encode the vector into a s and t octahedral coordinates. + int32_t s, t; + converter.FloatVectorToQuantizedOctahedralCoords(att_val, &s, &t); + portable_attribute_data[dst_index++] = s; + portable_attribute_data[dst_index++] = t; + } + } else { + for (PointIndex i(0); i < num_points; ++i) { + const AttributeValueIndex att_val_id = attribute.mapped_index(i); + attribute.GetValue(att_val_id, att_val); + // Encode the vector into a s and t octahedral coordinates. + int32_t s, t; + converter.FloatVectorToQuantizedOctahedralCoords(att_val, &s, &t); + portable_attribute_data[dst_index++] = s; + portable_attribute_data[dst_index++] = t; + } + } + + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/attributes/attribute_octahedron_transform.h b/contrib/draco/src/draco/attributes/attribute_octahedron_transform.h new file mode 100644 index 000000000..21a1725bb --- /dev/null +++ b/contrib/draco/src/draco/attributes/attribute_octahedron_transform.h @@ -0,0 +1,81 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_ +#define DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_ + +#include "draco/attributes/attribute_transform.h" +#include "draco/attributes/point_attribute.h" +#include "draco/core/encoder_buffer.h" + +namespace draco { + +// Attribute transform for attributes transformed to octahedral coordinates. +class AttributeOctahedronTransform : public AttributeTransform { + public: + AttributeOctahedronTransform() : quantization_bits_(-1) {} + + // Return attribute transform type. + AttributeTransformType Type() const override { + return ATTRIBUTE_OCTAHEDRON_TRANSFORM; + } + // Try to init transform from attribute. + bool InitFromAttribute(const PointAttribute &attribute) override; + // Copy parameter values into the provided AttributeTransformData instance. + void CopyToAttributeTransformData( + AttributeTransformData *out_data) const override; + + bool TransformAttribute(const PointAttribute &attribute, + const std::vector &point_ids, + PointAttribute *target_attribute) override; + + bool InverseTransformAttribute(const PointAttribute &attribute, + PointAttribute *target_attribute) override; + + // Set number of quantization bits. + void SetParameters(int quantization_bits); + + // Encode relevant parameters into buffer. + bool EncodeParameters(EncoderBuffer *encoder_buffer) const override; + + bool DecodeParameters(const PointAttribute &attribute, + DecoderBuffer *decoder_buffer) override; + + bool is_initialized() const { return quantization_bits_ != -1; } + int32_t quantization_bits() const { return quantization_bits_; } + + protected: + DataType GetTransformedDataType( + const PointAttribute &attribute) const override { + return DT_UINT32; + } + int GetTransformedNumComponents( + const PointAttribute &attribute) const override { + return 2; + } + + // Perform the actual transformation. + bool GeneratePortableAttribute(const PointAttribute &attribute, + const std::vector &point_ids, + int num_points, + PointAttribute *target_attribute) const; + + private: + int32_t quantization_bits_; +}; + +} // namespace draco + +#endif // DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/attributes/attribute_quantization_transform.cc b/contrib/draco/src/draco/attributes/attribute_quantization_transform.cc new file mode 100644 index 000000000..a7f93a488 --- /dev/null +++ b/contrib/draco/src/draco/attributes/attribute_quantization_transform.cc @@ -0,0 +1,260 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/attributes/attribute_quantization_transform.h" + +#include "draco/attributes/attribute_transform_type.h" +#include "draco/core/quantization_utils.h" + +namespace draco { + +bool AttributeQuantizationTransform::InitFromAttribute( + const PointAttribute &attribute) { + const AttributeTransformData *const transform_data = + attribute.GetAttributeTransformData(); + if (!transform_data || + transform_data->transform_type() != ATTRIBUTE_QUANTIZATION_TRANSFORM) { + return false; // Wrong transform type. + } + int32_t byte_offset = 0; + quantization_bits_ = transform_data->GetParameterValue(byte_offset); + byte_offset += 4; + min_values_.resize(attribute.num_components()); + for (int i = 0; i < attribute.num_components(); ++i) { + min_values_[i] = transform_data->GetParameterValue(byte_offset); + byte_offset += 4; + } + range_ = transform_data->GetParameterValue(byte_offset); + return true; +} + +// Copy parameter values into the provided AttributeTransformData instance. +void AttributeQuantizationTransform::CopyToAttributeTransformData( + AttributeTransformData *out_data) const { + out_data->set_transform_type(ATTRIBUTE_QUANTIZATION_TRANSFORM); + out_data->AppendParameterValue(quantization_bits_); + for (int i = 0; i < min_values_.size(); ++i) { + out_data->AppendParameterValue(min_values_[i]); + } + out_data->AppendParameterValue(range_); +} + +bool AttributeQuantizationTransform::TransformAttribute( + const PointAttribute &attribute, const std::vector &point_ids, + PointAttribute *target_attribute) { + if (point_ids.empty()) { + GeneratePortableAttribute(attribute, target_attribute->size(), + target_attribute); + } else { + GeneratePortableAttribute(attribute, point_ids, target_attribute->size(), + target_attribute); + } + return true; +} + +bool AttributeQuantizationTransform::InverseTransformAttribute( + const PointAttribute &attribute, PointAttribute *target_attribute) { + if (target_attribute->data_type() != DT_FLOAT32) { + return false; + } + + // Convert all quantized values back to floats. + const int32_t max_quantized_value = + (1u << static_cast(quantization_bits_)) - 1; + const int num_components = target_attribute->num_components(); + const int entry_size = sizeof(float) * num_components; + const std::unique_ptr att_val(new float[num_components]); + int quant_val_id = 0; + int out_byte_pos = 0; + Dequantizer dequantizer; + if (!dequantizer.Init(range_, max_quantized_value)) { + return false; + } + const int32_t *const source_attribute_data = + reinterpret_cast( + attribute.GetAddress(AttributeValueIndex(0))); + + const int num_values = target_attribute->size(); + + for (uint32_t i = 0; i < num_values; ++i) { + for (int c = 0; c < num_components; ++c) { + float value = + dequantizer.DequantizeFloat(source_attribute_data[quant_val_id++]); + value = value + min_values_[c]; + att_val[c] = value; + } + // Store the floating point value into the attribute buffer. + target_attribute->buffer()->Write(out_byte_pos, att_val.get(), entry_size); + out_byte_pos += entry_size; + } + return true; +} + +bool AttributeQuantizationTransform::IsQuantizationValid( + int quantization_bits) { + // Currently we allow only up to 30 bit quantization. + return quantization_bits >= 1 && quantization_bits <= 30; +} + +bool AttributeQuantizationTransform::SetParameters(int quantization_bits, + const float *min_values, + int num_components, + float range) { + if (!IsQuantizationValid(quantization_bits)) { + return false; + } + quantization_bits_ = quantization_bits; + min_values_.assign(min_values, min_values + num_components); + range_ = range; + return true; +} + +bool AttributeQuantizationTransform::ComputeParameters( + const PointAttribute &attribute, const int quantization_bits) { + if (quantization_bits_ != -1) { + return false; // already initialized. + } + if (!IsQuantizationValid(quantization_bits)) { + return false; + } + quantization_bits_ = quantization_bits; + + const int num_components = attribute.num_components(); + range_ = 0.f; + min_values_ = std::vector(num_components, 0.f); + const std::unique_ptr max_values(new float[num_components]); + const std::unique_ptr att_val(new float[num_components]); + // Compute minimum values and max value difference. + attribute.GetValue(AttributeValueIndex(0), att_val.get()); + attribute.GetValue(AttributeValueIndex(0), min_values_.data()); + attribute.GetValue(AttributeValueIndex(0), max_values.get()); + + for (AttributeValueIndex i(1); i < static_cast(attribute.size()); + ++i) { + attribute.GetValue(i, att_val.get()); + for (int c = 0; c < num_components; ++c) { + if (min_values_[c] > att_val[c]) { + min_values_[c] = att_val[c]; + } + if (max_values[c] < att_val[c]) { + max_values[c] = att_val[c]; + } + } + } + for (int c = 0; c < num_components; ++c) { + if (std::isnan(min_values_[c]) || std::isinf(min_values_[c]) || + std::isnan(max_values[c]) || std::isinf(max_values[c])) { + return false; + } + const float dif = max_values[c] - min_values_[c]; + if (dif > range_) { + range_ = dif; + } + } + + // In case all values are the same, initialize the range to unit length. This + // will ensure that all values are quantized properly to the same value. + if (range_ == 0.f) { + range_ = 1.f; + } + + return true; +} + +bool AttributeQuantizationTransform::EncodeParameters( + EncoderBuffer *encoder_buffer) const { + if (is_initialized()) { + encoder_buffer->Encode(min_values_.data(), + sizeof(float) * min_values_.size()); + encoder_buffer->Encode(range_); + encoder_buffer->Encode(static_cast(quantization_bits_)); + return true; + } + return false; +} + +bool AttributeQuantizationTransform::DecodeParameters( + const PointAttribute &attribute, DecoderBuffer *decoder_buffer) { + min_values_.resize(attribute.num_components()); + if (!decoder_buffer->Decode(&min_values_[0], + sizeof(float) * min_values_.size())) { + return false; + } + if (!decoder_buffer->Decode(&range_)) { + return false; + } + uint8_t quantization_bits; + if (!decoder_buffer->Decode(&quantization_bits)) { + return false; + } + if (!IsQuantizationValid(quantization_bits)) { + return false; + } + quantization_bits_ = quantization_bits; + return true; +} + +void AttributeQuantizationTransform::GeneratePortableAttribute( + const PointAttribute &attribute, int num_points, + PointAttribute *target_attribute) const { + DRACO_DCHECK(is_initialized()); + + const int num_components = attribute.num_components(); + + // Quantize all values using the order given by point_ids. + int32_t *const portable_attribute_data = reinterpret_cast( + target_attribute->GetAddress(AttributeValueIndex(0))); + const uint32_t max_quantized_value = (1 << (quantization_bits_)) - 1; + Quantizer quantizer; + quantizer.Init(range(), max_quantized_value); + int32_t dst_index = 0; + const std::unique_ptr att_val(new float[num_components]); + for (PointIndex i(0); i < num_points; ++i) { + const AttributeValueIndex att_val_id = attribute.mapped_index(i); + attribute.GetValue(att_val_id, att_val.get()); + for (int c = 0; c < num_components; ++c) { + const float value = (att_val[c] - min_values()[c]); + const int32_t q_val = quantizer.QuantizeFloat(value); + portable_attribute_data[dst_index++] = q_val; + } + } +} + +void AttributeQuantizationTransform::GeneratePortableAttribute( + const PointAttribute &attribute, const std::vector &point_ids, + int num_points, PointAttribute *target_attribute) const { + DRACO_DCHECK(is_initialized()); + + const int num_components = attribute.num_components(); + + // Quantize all values using the order given by point_ids. + int32_t *const portable_attribute_data = reinterpret_cast( + target_attribute->GetAddress(AttributeValueIndex(0))); + const uint32_t max_quantized_value = (1 << (quantization_bits_)) - 1; + Quantizer quantizer; + quantizer.Init(range(), max_quantized_value); + int32_t dst_index = 0; + const std::unique_ptr att_val(new float[num_components]); + for (uint32_t i = 0; i < point_ids.size(); ++i) { + const AttributeValueIndex att_val_id = attribute.mapped_index(point_ids[i]); + attribute.GetValue(att_val_id, att_val.get()); + for (int c = 0; c < num_components; ++c) { + const float value = (att_val[c] - min_values()[c]); + const int32_t q_val = quantizer.QuantizeFloat(value); + portable_attribute_data[dst_index++] = q_val; + } + } +} + +} // namespace draco diff --git a/contrib/draco/src/draco/attributes/attribute_quantization_transform.h b/contrib/draco/src/draco/attributes/attribute_quantization_transform.h new file mode 100644 index 000000000..f1122b680 --- /dev/null +++ b/contrib/draco/src/draco/attributes/attribute_quantization_transform.h @@ -0,0 +1,102 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_QUANTIZATION_TRANSFORM_H_ +#define DRACO_ATTRIBUTES_ATTRIBUTE_QUANTIZATION_TRANSFORM_H_ + +#include + +#include "draco/attributes/attribute_transform.h" +#include "draco/attributes/point_attribute.h" +#include "draco/core/encoder_buffer.h" + +namespace draco { + +// Attribute transform for quantized attributes. +class AttributeQuantizationTransform : public AttributeTransform { + public: + AttributeQuantizationTransform() : quantization_bits_(-1), range_(0.f) {} + // Return attribute transform type. + AttributeTransformType Type() const override { + return ATTRIBUTE_QUANTIZATION_TRANSFORM; + } + // Try to init transform from attribute. + bool InitFromAttribute(const PointAttribute &attribute) override; + // Copy parameter values into the provided AttributeTransformData instance. + void CopyToAttributeTransformData( + AttributeTransformData *out_data) const override; + + bool TransformAttribute(const PointAttribute &attribute, + const std::vector &point_ids, + PointAttribute *target_attribute) override; + + bool InverseTransformAttribute(const PointAttribute &attribute, + PointAttribute *target_attribute) override; + + bool SetParameters(int quantization_bits, const float *min_values, + int num_components, float range); + + bool ComputeParameters(const PointAttribute &attribute, + const int quantization_bits); + + // Encode relevant parameters into buffer. + bool EncodeParameters(EncoderBuffer *encoder_buffer) const override; + + bool DecodeParameters(const PointAttribute &attribute, + DecoderBuffer *decoder_buffer) override; + + int32_t quantization_bits() const { return quantization_bits_; } + float min_value(int axis) const { return min_values_[axis]; } + const std::vector &min_values() const { return min_values_; } + float range() const { return range_; } + bool is_initialized() const { return quantization_bits_ != -1; } + + protected: + // Create portable attribute using 1:1 mapping between points in the input and + // output attribute. + void GeneratePortableAttribute(const PointAttribute &attribute, + int num_points, + PointAttribute *target_attribute) const; + + // Create portable attribute using custom mapping between input and output + // points. + void GeneratePortableAttribute(const PointAttribute &attribute, + const std::vector &point_ids, + int num_points, + PointAttribute *target_attribute) const; + + DataType GetTransformedDataType( + const PointAttribute &attribute) const override { + return DT_UINT32; + } + int GetTransformedNumComponents( + const PointAttribute &attribute) const override { + return attribute.num_components(); + } + + static bool IsQuantizationValid(int quantization_bits); + + private: + int32_t quantization_bits_; + + // Minimal dequantized value for each component of the attribute. + std::vector min_values_; + + // Bounds of the dequantized attribute (max delta over all components). + float range_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTE_DEQUANTIZATION_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/attributes/attribute_transform.cc b/contrib/draco/src/draco/attributes/attribute_transform.cc new file mode 100644 index 000000000..174e6b822 --- /dev/null +++ b/contrib/draco/src/draco/attributes/attribute_transform.cc @@ -0,0 +1,40 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/attributes/attribute_transform.h" + +namespace draco { + +bool AttributeTransform::TransferToAttribute(PointAttribute *attribute) const { + std::unique_ptr transform_data( + new AttributeTransformData()); + this->CopyToAttributeTransformData(transform_data.get()); + attribute->SetAttributeTransformData(std::move(transform_data)); + return true; +} + +std::unique_ptr AttributeTransform::InitTransformedAttribute( + const PointAttribute &src_attribute, int num_entries) { + const int num_components = GetTransformedNumComponents(src_attribute); + const DataType dt = GetTransformedDataType(src_attribute); + GeometryAttribute va; + va.Init(src_attribute.attribute_type(), nullptr, num_components, dt, false, + num_components * DataTypeLength(dt), 0); + std::unique_ptr transformed_attribute(new PointAttribute(va)); + transformed_attribute->Reset(num_entries); + transformed_attribute->SetIdentityMapping(); + return transformed_attribute; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/attributes/attribute_transform.h b/contrib/draco/src/draco/attributes/attribute_transform.h new file mode 100644 index 000000000..62aad60db --- /dev/null +++ b/contrib/draco/src/draco/attributes/attribute_transform.h @@ -0,0 +1,76 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_H_ +#define DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_H_ + +#include "draco/attributes/attribute_transform_data.h" +#include "draco/attributes/point_attribute.h" +#include "draco/core/decoder_buffer.h" +#include "draco/core/encoder_buffer.h" + +namespace draco { + +// Virtual base class for various attribute transforms, enforcing common +// interface where possible. +class AttributeTransform { + public: + virtual ~AttributeTransform() = default; + + // Return attribute transform type. + virtual AttributeTransformType Type() const = 0; + // Try to init transform from attribute. + virtual bool InitFromAttribute(const PointAttribute &attribute) = 0; + // Copy parameter values into the provided AttributeTransformData instance. + virtual void CopyToAttributeTransformData( + AttributeTransformData *out_data) const = 0; + bool TransferToAttribute(PointAttribute *attribute) const; + + // Applies the transform to |attribute| and stores the result in + // |target_attribute|. |point_ids| is an optional vector that can be used to + // remap values during the transform. + virtual bool TransformAttribute(const PointAttribute &attribute, + const std::vector &point_ids, + PointAttribute *target_attribute) = 0; + + // Applies an inverse transform to |attribute| and stores the result in + // |target_attribute|. In this case, |attribute| is an attribute that was + // already transformed (e.g. quantized) and |target_attribute| is the + // attribute before the transformation. + virtual bool InverseTransformAttribute(const PointAttribute &attribute, + PointAttribute *target_attribute) = 0; + + // Encodes all data needed by the transformation into the |encoder_buffer|. + virtual bool EncodeParameters(EncoderBuffer *encoder_buffer) const = 0; + + // Decodes all data needed to transform |attribute| back to the original + // format. + virtual bool DecodeParameters(const PointAttribute &attribute, + DecoderBuffer *decoder_buffer) = 0; + + // Initializes a transformed attribute that can be used as target in the + // TransformAttribute() function call. + virtual std::unique_ptr InitTransformedAttribute( + const PointAttribute &src_attribute, int num_entries); + + protected: + virtual DataType GetTransformedDataType( + const PointAttribute &attribute) const = 0; + virtual int GetTransformedNumComponents( + const PointAttribute &attribute) const = 0; +}; + +} // namespace draco + +#endif // DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/attributes/attribute_transform_data.h b/contrib/draco/src/draco/attributes/attribute_transform_data.h new file mode 100644 index 000000000..96ed07320 --- /dev/null +++ b/contrib/draco/src/draco/attributes/attribute_transform_data.h @@ -0,0 +1,71 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_DATA_H_ +#define DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_DATA_H_ + +#include + +#include "draco/attributes/attribute_transform_type.h" +#include "draco/core/data_buffer.h" + +namespace draco { + +// Class for holding parameter values for an attribute transform of a +// PointAttribute. This can be for example quantization data for an attribute +// that holds quantized values. This class provides only a basic storage for +// attribute transform parameters and it should be accessed only through wrapper +// classes for a specific transform (e.g. AttributeQuantizationTransform). +class AttributeTransformData { + public: + AttributeTransformData() : transform_type_(ATTRIBUTE_INVALID_TRANSFORM) {} + AttributeTransformData(const AttributeTransformData &data) = default; + + // Returns the type of the attribute transform that is described by the class. + AttributeTransformType transform_type() const { return transform_type_; } + void set_transform_type(AttributeTransformType type) { + transform_type_ = type; + } + + // Returns a parameter value on a given |byte_offset|. + template + DataTypeT GetParameterValue(int byte_offset) const { + DataTypeT out_data; + buffer_.Read(byte_offset, &out_data, sizeof(DataTypeT)); + return out_data; + } + + // Sets a parameter value on a given |byte_offset|. + template + void SetParameterValue(int byte_offset, const DataTypeT &in_data) { + if (byte_offset + sizeof(DataTypeT) > buffer_.data_size()) { + buffer_.Resize(byte_offset + sizeof(DataTypeT)); + } + buffer_.Write(byte_offset, &in_data, sizeof(DataTypeT)); + } + + // Sets a parameter value at the end of the |buffer_|. + template + void AppendParameterValue(const DataTypeT &in_data) { + SetParameterValue(static_cast(buffer_.data_size()), in_data); + } + + private: + AttributeTransformType transform_type_; + DataBuffer buffer_; +}; + +} // namespace draco + +#endif // DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_DATA_H_ diff --git a/contrib/draco/src/draco/attributes/attribute_transform_type.h b/contrib/draco/src/draco/attributes/attribute_transform_type.h new file mode 100644 index 000000000..51ce6f333 --- /dev/null +++ b/contrib/draco/src/draco/attributes/attribute_transform_type.h @@ -0,0 +1,30 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_TYPE_H_ +#define DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_TYPE_H_ + +namespace draco { + +// List of all currently supported attribute transforms. +enum AttributeTransformType { + ATTRIBUTE_INVALID_TRANSFORM = -1, + ATTRIBUTE_NO_TRANSFORM = 0, + ATTRIBUTE_QUANTIZATION_TRANSFORM = 1, + ATTRIBUTE_OCTAHEDRON_TRANSFORM = 2, +}; + +} // namespace draco + +#endif // DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_TYPE_H_ diff --git a/contrib/draco/src/draco/attributes/geometry_attribute.cc b/contrib/draco/src/draco/attributes/geometry_attribute.cc new file mode 100644 index 000000000..b62478426 --- /dev/null +++ b/contrib/draco/src/draco/attributes/geometry_attribute.cc @@ -0,0 +1,102 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/attributes/geometry_attribute.h" + +namespace draco { + +GeometryAttribute::GeometryAttribute() + : buffer_(nullptr), + num_components_(1), + data_type_(DT_FLOAT32), + byte_stride_(0), + byte_offset_(0), + attribute_type_(INVALID), + unique_id_(0) {} + +void GeometryAttribute::Init(GeometryAttribute::Type attribute_type, + DataBuffer *buffer, int8_t num_components, + DataType data_type, bool normalized, + int64_t byte_stride, int64_t byte_offset) { + buffer_ = buffer; + if (buffer) { + buffer_descriptor_.buffer_id = buffer->buffer_id(); + buffer_descriptor_.buffer_update_count = buffer->update_count(); + } + num_components_ = num_components; + data_type_ = data_type; + normalized_ = normalized; + byte_stride_ = byte_stride; + byte_offset_ = byte_offset; + attribute_type_ = attribute_type; +} + +bool GeometryAttribute::CopyFrom(const GeometryAttribute &src_att) { + num_components_ = src_att.num_components_; + data_type_ = src_att.data_type_; + normalized_ = src_att.normalized_; + byte_stride_ = src_att.byte_stride_; + byte_offset_ = src_att.byte_offset_; + attribute_type_ = src_att.attribute_type_; + buffer_descriptor_ = src_att.buffer_descriptor_; + unique_id_ = src_att.unique_id_; + if (src_att.buffer_ == nullptr) { + buffer_ = nullptr; + } else { + if (buffer_ == nullptr) { + return false; + } + buffer_->Update(src_att.buffer_->data(), src_att.buffer_->data_size()); + } + return true; +} + +bool GeometryAttribute::operator==(const GeometryAttribute &va) const { + if (attribute_type_ != va.attribute_type_) { + return false; + } + // It's OK to compare just the buffer descriptors here. We don't need to + // compare the buffers themselves. + if (buffer_descriptor_.buffer_id != va.buffer_descriptor_.buffer_id) { + return false; + } + if (buffer_descriptor_.buffer_update_count != + va.buffer_descriptor_.buffer_update_count) { + return false; + } + if (num_components_ != va.num_components_) { + return false; + } + if (data_type_ != va.data_type_) { + return false; + } + if (byte_stride_ != va.byte_stride_) { + return false; + } + if (byte_offset_ != va.byte_offset_) { + return false; + } + return true; +} + +void GeometryAttribute::ResetBuffer(DataBuffer *buffer, int64_t byte_stride, + int64_t byte_offset) { + buffer_ = buffer; + buffer_descriptor_.buffer_id = buffer->buffer_id(); + buffer_descriptor_.buffer_update_count = buffer->update_count(); + byte_stride_ = byte_stride; + byte_offset_ = byte_offset; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/attributes/geometry_attribute.h b/contrib/draco/src/draco/attributes/geometry_attribute.h new file mode 100644 index 000000000..f4d099b1b --- /dev/null +++ b/contrib/draco/src/draco/attributes/geometry_attribute.h @@ -0,0 +1,350 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_ +#define DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_ + +#include +#include + +#include "draco/attributes/geometry_indices.h" +#include "draco/core/data_buffer.h" +#include "draco/core/hash_utils.h" + +namespace draco { + +// The class provides access to a specific attribute which is stored in a +// DataBuffer, such as normals or coordinates. However, the GeometryAttribute +// class does not own the buffer and the buffer itself may store other data +// unrelated to this attribute (such as data for other attributes in which case +// we can have multiple GeometryAttributes accessing one buffer). Typically, +// all attributes for a point (or corner, face) are stored in one block, which +// is advantageous in terms of memory access. The length of the entire block is +// given by the byte_stride, the position where the attribute starts is given by +// the byte_offset, the actual number of bytes that the attribute occupies is +// given by the data_type and the number of components. +class GeometryAttribute { + public: + // Supported attribute types. + enum Type { + INVALID = -1, + // Named attributes start here. The difference between named and generic + // attributes is that for named attributes we know their purpose and we + // can apply some special methods when dealing with them (e.g. during + // encoding). + POSITION = 0, + NORMAL, + COLOR, + TEX_COORD, + // A special id used to mark attributes that are not assigned to any known + // predefined use case. Such attributes are often used for a shader specific + // data. + GENERIC, + // Total number of different attribute types. + // Always keep behind all named attributes. + NAMED_ATTRIBUTES_COUNT, + }; + + GeometryAttribute(); + // Initializes and enables the attribute. + void Init(Type attribute_type, DataBuffer *buffer, int8_t num_components, + DataType data_type, bool normalized, int64_t byte_stride, + int64_t byte_offset); + bool IsValid() const { return buffer_ != nullptr; } + + // Copies data from the source attribute to the this attribute. + // This attribute must have a valid buffer allocated otherwise the operation + // is going to fail and return false. + bool CopyFrom(const GeometryAttribute &src_att); + + // Function for getting a attribute value with a specific format. + // Unsafe. Caller must ensure the accessed memory is valid. + // T is the attribute data type. + // att_components_t is the number of attribute components. + template + std::array GetValue( + AttributeValueIndex att_index) const { + // Byte address of the attribute index. + const int64_t byte_pos = byte_offset_ + byte_stride_ * att_index.value(); + std::array out; + buffer_->Read(byte_pos, &(out[0]), sizeof(out)); + return out; + } + + // Function for getting a attribute value with a specific format. + // T is the attribute data type. + // att_components_t is the number of attribute components. + template + bool GetValue(AttributeValueIndex att_index, + std::array *out) const { + // Byte address of the attribute index. + const int64_t byte_pos = byte_offset_ + byte_stride_ * att_index.value(); + // Check we are not reading past end of data. + if (byte_pos + sizeof(*out) > buffer_->data_size()) { + return false; + } + buffer_->Read(byte_pos, &((*out)[0]), sizeof(*out)); + return true; + } + + // Returns the byte position of the attribute entry in the data buffer. + inline int64_t GetBytePos(AttributeValueIndex att_index) const { + return byte_offset_ + byte_stride_ * att_index.value(); + } + + inline const uint8_t *GetAddress(AttributeValueIndex att_index) const { + const int64_t byte_pos = GetBytePos(att_index); + return buffer_->data() + byte_pos; + } + inline uint8_t *GetAddress(AttributeValueIndex att_index) { + const int64_t byte_pos = GetBytePos(att_index); + return buffer_->data() + byte_pos; + } + inline bool IsAddressValid(const uint8_t *address) const { + return ((buffer_->data() + buffer_->data_size()) > address); + } + + // Fills out_data with the raw value of the requested attribute entry. + // out_data must be at least byte_stride_ long. + void GetValue(AttributeValueIndex att_index, void *out_data) const { + const int64_t byte_pos = byte_offset_ + byte_stride_ * att_index.value(); + buffer_->Read(byte_pos, out_data, byte_stride_); + } + + // Sets a value of an attribute entry. The input value must be allocated to + // cover all components of a single attribute entry. + void SetAttributeValue(AttributeValueIndex entry_index, const void *value) { + const int64_t byte_pos = entry_index.value() * byte_stride(); + buffer_->Write(byte_pos, value, byte_stride()); + } + + // DEPRECATED: Use + // ConvertValue(AttributeValueIndex att_id, + // int out_num_components, + // OutT *out_val); + // + // Function for conversion of a attribute to a specific output format. + // OutT is the desired data type of the attribute. + // out_att_components_t is the number of components of the output format. + // Returns false when the conversion failed. + template + bool ConvertValue(AttributeValueIndex att_id, OutT *out_val) const { + return ConvertValue(att_id, out_att_components_t, out_val); + } + + // Function for conversion of a attribute to a specific output format. + // |out_val| needs to be able to store |out_num_components| values. + // OutT is the desired data type of the attribute. + // Returns false when the conversion failed. + template + bool ConvertValue(AttributeValueIndex att_id, int8_t out_num_components, + OutT *out_val) const { + if (out_val == nullptr) { + return false; + } + switch (data_type_) { + case DT_INT8: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_UINT8: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_INT16: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_UINT16: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_INT32: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_UINT32: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_INT64: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_UINT64: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_FLOAT32: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_FLOAT64: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_BOOL: + return ConvertTypedValue(att_id, out_num_components, + out_val); + default: + // Wrong attribute type. + return false; + } + } + + // Function for conversion of a attribute to a specific output format. + // The |out_value| must be able to store all components of a single attribute + // entry. + // OutT is the desired data type of the attribute. + // Returns false when the conversion failed. + template + bool ConvertValue(AttributeValueIndex att_index, OutT *out_value) const { + return ConvertValue(att_index, num_components_, out_value); + } + + // Utility function. Returns |attribute_type| as std::string. + static std::string TypeToString(Type attribute_type) { + switch (attribute_type) { + case INVALID: + return "INVALID"; + case POSITION: + return "POSITION"; + case NORMAL: + return "NORMAL"; + case COLOR: + return "COLOR"; + case TEX_COORD: + return "TEX_COORD"; + case GENERIC: + return "GENERIC"; + default: + return "UNKNOWN"; + } + } + + bool operator==(const GeometryAttribute &va) const; + + // Returns the type of the attribute indicating the nature of the attribute. + Type attribute_type() const { return attribute_type_; } + void set_attribute_type(Type type) { attribute_type_ = type; } + // Returns the data type that is stored in the attribute. + DataType data_type() const { return data_type_; } + // Returns the number of components that are stored for each entry. + // For position attribute this is usually three (x,y,z), + // while texture coordinates have two components (u,v). + int8_t num_components() const { return num_components_; } + // Indicates whether the data type should be normalized before interpretation, + // that is, it should be divided by the max value of the data type. + bool normalized() const { return normalized_; } + // The buffer storing the entire data of the attribute. + const DataBuffer *buffer() const { return buffer_; } + // Returns the number of bytes between two attribute entries, this is, at + // least size of the data types times number of components. + int64_t byte_stride() const { return byte_stride_; } + // The offset where the attribute starts within the block of size byte_stride. + int64_t byte_offset() const { return byte_offset_; } + void set_byte_offset(int64_t byte_offset) { byte_offset_ = byte_offset; } + DataBufferDescriptor buffer_descriptor() const { return buffer_descriptor_; } + uint32_t unique_id() const { return unique_id_; } + void set_unique_id(uint32_t id) { unique_id_ = id; } + + protected: + // Sets a new internal storage for the attribute. + void ResetBuffer(DataBuffer *buffer, int64_t byte_stride, + int64_t byte_offset); + + private: + // Function for conversion of an attribute to a specific output format given a + // format of the stored attribute. + // T is the stored attribute data type. + // OutT is the desired data type of the attribute. + template + bool ConvertTypedValue(AttributeValueIndex att_id, int8_t out_num_components, + OutT *out_value) const { + const uint8_t *src_address = GetAddress(att_id); + + // Convert all components available in both the original and output formats. + for (int i = 0; i < std::min(num_components_, out_num_components); ++i) { + if (!IsAddressValid(src_address)) { + return false; + } + const T in_value = *reinterpret_cast(src_address); + + // Make sure the in_value fits within the range of values that OutT + // is able to represent. Perform the check only for integral types. + if (std::is_integral::value && std::is_integral::value) { + static constexpr OutT kOutMin = + std::is_signed::value ? std::numeric_limits::lowest() : 0; + if (in_value < kOutMin || in_value > std::numeric_limits::max()) { + return false; + } + } + + out_value[i] = static_cast(in_value); + // When converting integer to floating point, normalize the value if + // necessary. + if (std::is_integral::value && std::is_floating_point::value && + normalized_) { + out_value[i] /= static_cast(std::numeric_limits::max()); + } + // TODO(ostava): Add handling of normalized attributes when converting + // between different integer representations. If the attribute is + // normalized, integer values should be converted as if they represent 0-1 + // range. E.g. when we convert uint16 to uint8, the range <0, 2^16 - 1> + // should be converted to range <0, 2^8 - 1>. + src_address += sizeof(T); + } + // Fill empty data for unused output components if needed. + for (int i = num_components_; i < out_num_components; ++i) { + out_value[i] = static_cast(0); + } + return true; + } + + DataBuffer *buffer_; + // The buffer descriptor is stored at the time the buffer is attached to this + // attribute. The purpose is to detect if any changes happened to the buffer + // since the time it was attached. + DataBufferDescriptor buffer_descriptor_; + int8_t num_components_; + DataType data_type_; + bool normalized_; + int64_t byte_stride_; + int64_t byte_offset_; + + Type attribute_type_; + + // Unique id of this attribute. No two attributes could have the same unique + // id. It is used to identify each attribute, especially when there are + // multiple attribute of the same type in a point cloud. + uint32_t unique_id_; + + friend struct GeometryAttributeHasher; +}; + +// Hashing support + +// Function object for using Attribute as a hash key. +struct GeometryAttributeHasher { + size_t operator()(const GeometryAttribute &va) const { + size_t hash = HashCombine(va.buffer_descriptor_.buffer_id, + va.buffer_descriptor_.buffer_update_count); + hash = HashCombine(va.num_components_, hash); + hash = HashCombine(static_cast(va.data_type_), hash); + hash = HashCombine(static_cast(va.attribute_type_), hash); + hash = HashCombine(va.byte_stride_, hash); + return HashCombine(va.byte_offset_, hash); + } +}; + +// Function object for using GeometryAttribute::Type as a hash key. +struct GeometryAttributeTypeHasher { + size_t operator()(const GeometryAttribute::Type &at) const { + return static_cast(at); + } +}; + +} // namespace draco + +#endif // DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_ diff --git a/contrib/draco/src/draco/attributes/geometry_indices.h b/contrib/draco/src/draco/attributes/geometry_indices.h new file mode 100644 index 000000000..80e43e30a --- /dev/null +++ b/contrib/draco/src/draco/attributes/geometry_indices.h @@ -0,0 +1,54 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ATTRIBUTES_GEOMETRY_INDICES_H_ +#define DRACO_ATTRIBUTES_GEOMETRY_INDICES_H_ + +#include + +#include + +#include "draco/core/draco_index_type.h" + +namespace draco { + +// Index of an attribute value entry stored in a GeometryAttribute. +DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, AttributeValueIndex) +// Index of a point in a PointCloud. +DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, PointIndex) +// Vertex index in a Mesh or CornerTable. +DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, VertexIndex) +// Corner index that identifies a corner in a Mesh or CornerTable. +DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, CornerIndex) +// Face index for Mesh and CornerTable. +DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, FaceIndex) + +// Constants denoting invalid indices. +static constexpr AttributeValueIndex kInvalidAttributeValueIndex( + std::numeric_limits::max()); +static constexpr PointIndex kInvalidPointIndex( + std::numeric_limits::max()); +static constexpr VertexIndex kInvalidVertexIndex( + std::numeric_limits::max()); +static constexpr CornerIndex kInvalidCornerIndex( + std::numeric_limits::max()); +static constexpr FaceIndex kInvalidFaceIndex( + std::numeric_limits::max()); + +// TODO(ostava): Add strongly typed indices for attribute id and unique +// attribute id. + +} // namespace draco + +#endif // DRACO_ATTRIBUTES_GEOMETRY_INDICES_H_ diff --git a/contrib/draco/src/draco/attributes/point_attribute.cc b/contrib/draco/src/draco/attributes/point_attribute.cc new file mode 100644 index 000000000..b28f860c1 --- /dev/null +++ b/contrib/draco/src/draco/attributes/point_attribute.cc @@ -0,0 +1,225 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/attributes/point_attribute.h" + +#include + +using std::unordered_map; + +// Shortcut for typed conditionals. +template +using conditional_t = typename std::conditional::type; + +namespace draco { + +PointAttribute::PointAttribute() + : num_unique_entries_(0), identity_mapping_(false) {} + +PointAttribute::PointAttribute(const GeometryAttribute &att) + : GeometryAttribute(att), + num_unique_entries_(0), + identity_mapping_(false) {} + +void PointAttribute::Init(Type attribute_type, int8_t num_components, + DataType data_type, bool normalized, + size_t num_attribute_values) { + attribute_buffer_ = std::unique_ptr(new DataBuffer()); + GeometryAttribute::Init(attribute_type, attribute_buffer_.get(), + num_components, data_type, normalized, + DataTypeLength(data_type) * num_components, 0); + Reset(num_attribute_values); + SetIdentityMapping(); +} + +void PointAttribute::CopyFrom(const PointAttribute &src_att) { + if (buffer() == nullptr) { + // If the destination attribute doesn't have a valid buffer, create it. + attribute_buffer_ = std::unique_ptr(new DataBuffer()); + ResetBuffer(attribute_buffer_.get(), 0, 0); + } + if (!GeometryAttribute::CopyFrom(src_att)) { + return; + } + identity_mapping_ = src_att.identity_mapping_; + num_unique_entries_ = src_att.num_unique_entries_; + indices_map_ = src_att.indices_map_; + if (src_att.attribute_transform_data_) { + attribute_transform_data_ = std::unique_ptr( + new AttributeTransformData(*src_att.attribute_transform_data_)); + } else { + attribute_transform_data_ = nullptr; + } +} + +bool PointAttribute::Reset(size_t num_attribute_values) { + if (attribute_buffer_ == nullptr) { + attribute_buffer_ = std::unique_ptr(new DataBuffer()); + } + const int64_t entry_size = DataTypeLength(data_type()) * num_components(); + if (!attribute_buffer_->Update(nullptr, num_attribute_values * entry_size)) { + return false; + } + // Assign the new buffer to the parent attribute. + ResetBuffer(attribute_buffer_.get(), entry_size, 0); + num_unique_entries_ = static_cast(num_attribute_values); + return true; +} + +void PointAttribute::Resize(size_t new_num_unique_entries) { + num_unique_entries_ = static_cast(new_num_unique_entries); + attribute_buffer_->Resize(new_num_unique_entries * byte_stride()); +} + +#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED +AttributeValueIndex::ValueType PointAttribute::DeduplicateValues( + const GeometryAttribute &in_att) { + return DeduplicateValues(in_att, AttributeValueIndex(0)); +} + +AttributeValueIndex::ValueType PointAttribute::DeduplicateValues( + const GeometryAttribute &in_att, AttributeValueIndex in_att_offset) { + AttributeValueIndex::ValueType unique_vals = 0; + switch (in_att.data_type()) { + // Currently we support only float, uint8, and uint16 arguments. + case DT_FLOAT32: + unique_vals = DeduplicateTypedValues(in_att, in_att_offset); + break; + case DT_INT8: + unique_vals = DeduplicateTypedValues(in_att, in_att_offset); + break; + case DT_UINT8: + case DT_BOOL: + unique_vals = DeduplicateTypedValues(in_att, in_att_offset); + break; + case DT_UINT16: + unique_vals = DeduplicateTypedValues(in_att, in_att_offset); + break; + case DT_INT16: + unique_vals = DeduplicateTypedValues(in_att, in_att_offset); + break; + case DT_UINT32: + unique_vals = DeduplicateTypedValues(in_att, in_att_offset); + break; + case DT_INT32: + unique_vals = DeduplicateTypedValues(in_att, in_att_offset); + break; + default: + return -1; // Unsupported data type. + } + if (unique_vals == 0) { + return -1; // Unexpected error. + } + return unique_vals; +} + +// Helper function for calling UnifyDuplicateAttributes +// with the correct template arguments. +// Returns the number of unique attribute values. +template +AttributeValueIndex::ValueType PointAttribute::DeduplicateTypedValues( + const GeometryAttribute &in_att, AttributeValueIndex in_att_offset) { + // Select the correct method to call based on the number of attribute + // components. + switch (in_att.num_components()) { + case 1: + return DeduplicateFormattedValues(in_att, in_att_offset); + case 2: + return DeduplicateFormattedValues(in_att, in_att_offset); + case 3: + return DeduplicateFormattedValues(in_att, in_att_offset); + case 4: + return DeduplicateFormattedValues(in_att, in_att_offset); + default: + return 0; + } +} + +template +AttributeValueIndex::ValueType PointAttribute::DeduplicateFormattedValues( + const GeometryAttribute &in_att, AttributeValueIndex in_att_offset) { + // We want to detect duplicates using a hash map but we cannot hash floating + // point numbers directly so bit-copy floats to the same sized integers and + // hash them. + + // First we need to determine which int type to use (1, 2, 4 or 8 bytes). + // Note, this is done at compile time using std::conditional struct. + // Conditional is in form . If bool-expression + // is true the "true" branch is used and vice versa. All at compile time. + typedef conditional_t>> + HashType; + + AttributeValueIndex unique_vals(0); + typedef std::array AttributeValue; + typedef std::array AttributeHashableValue; + // Hash map storing index of the first attribute with a given value. + unordered_map> + value_to_index_map; + AttributeValue att_value; + AttributeHashableValue hashable_value; + IndexTypeVector value_map( + num_unique_entries_); + for (AttributeValueIndex i(0); i < num_unique_entries_; ++i) { + const AttributeValueIndex att_pos = i + in_att_offset; + att_value = in_att.GetValue(att_pos); + // Convert the value to hashable type. Bit-copy real attributes to integers. + memcpy(&(hashable_value[0]), &(att_value[0]), sizeof(att_value)); + + // Check if the given attribute value has been used before already. + auto it = value_to_index_map.find(hashable_value); + if (it != value_to_index_map.end()) { + // Duplicated value found. Update index mapping. + value_map[i] = it->second; + } else { + // New unique value. + // Update the hash map with a new entry pointing to the latest unique + // vertex index. + value_to_index_map.insert( + std::pair(hashable_value, + unique_vals)); + // Add the unique value to the mesh builder. + SetAttributeValue(unique_vals, &att_value); + // Update index mapping. + value_map[i] = unique_vals; + + ++unique_vals; + } + } + if (unique_vals == num_unique_entries_) { + return unique_vals.value(); // Nothing has changed. + } + if (is_mapping_identity()) { + // Change identity mapping to the explicit one. + // The number of points is equal to the number of old unique values. + SetExplicitMapping(num_unique_entries_); + // Update the explicit map. + for (uint32_t i = 0; i < num_unique_entries_; ++i) { + SetPointMapEntry(PointIndex(i), value_map[AttributeValueIndex(i)]); + } + } else { + // Update point to value map using the mapping between old and new values. + for (PointIndex i(0); i < static_cast(indices_map_.size()); ++i) { + SetPointMapEntry(i, value_map[indices_map_[i]]); + } + } + num_unique_entries_ = unique_vals.value(); + return num_unique_entries_; +} +#endif + +} // namespace draco diff --git a/contrib/draco/src/draco/attributes/point_attribute.h b/contrib/draco/src/draco/attributes/point_attribute.h new file mode 100644 index 000000000..ee3662031 --- /dev/null +++ b/contrib/draco/src/draco/attributes/point_attribute.h @@ -0,0 +1,190 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ATTRIBUTES_POINT_ATTRIBUTE_H_ +#define DRACO_ATTRIBUTES_POINT_ATTRIBUTE_H_ + +#include + +#include "draco/attributes/attribute_transform_data.h" +#include "draco/attributes/geometry_attribute.h" +#include "draco/core/draco_index_type_vector.h" +#include "draco/core/hash_utils.h" +#include "draco/core/macros.h" +#include "draco/draco_features.h" + +namespace draco { + +// Class for storing point specific data about each attribute. In general, +// multiple points stored in a point cloud can share the same attribute value +// and this class provides the necessary mapping between point ids and attribute +// value ids. +class PointAttribute : public GeometryAttribute { + public: + PointAttribute(); + explicit PointAttribute(const GeometryAttribute &att); + + // Make sure the move constructor is defined (needed for better performance + // when new attributes are added to PointCloud). + PointAttribute(PointAttribute &&attribute) = default; + PointAttribute &operator=(PointAttribute &&attribute) = default; + + // Initializes a point attribute. By default the attribute will be set to + // identity mapping between point indices and attribute values. To set custom + // mapping use SetExplicitMapping() function. + void Init(Type attribute_type, int8_t num_components, DataType data_type, + bool normalized, size_t num_attribute_values); + + // Copies attribute data from the provided |src_att| attribute. + void CopyFrom(const PointAttribute &src_att); + + // Prepares the attribute storage for the specified number of entries. + bool Reset(size_t num_attribute_values); + + size_t size() const { return num_unique_entries_; } + AttributeValueIndex mapped_index(PointIndex point_index) const { + if (identity_mapping_) { + return AttributeValueIndex(point_index.value()); + } + return indices_map_[point_index]; + } + DataBuffer *buffer() const { return attribute_buffer_.get(); } + bool is_mapping_identity() const { return identity_mapping_; } + size_t indices_map_size() const { + if (is_mapping_identity()) { + return 0; + } + return indices_map_.size(); + } + + const uint8_t *GetAddressOfMappedIndex(PointIndex point_index) const { + return GetAddress(mapped_index(point_index)); + } + + // Sets the new number of unique attribute entries for the attribute. The + // function resizes the attribute storage to hold |num_attribute_values| + // entries. + // All previous entries with AttributeValueIndex < |num_attribute_values| + // are preserved. Caller needs to ensure that the PointAttribute is still + // valid after the resizing operation (that is, each point is mapped to a + // valid attribute value). + void Resize(size_t new_num_unique_entries); + + // Functions for setting the type of mapping between point indices and + // attribute entry ids. + // This function sets the mapping to implicit, where point indices are equal + // to attribute entry indices. + void SetIdentityMapping() { + identity_mapping_ = true; + indices_map_.clear(); + } + // This function sets the mapping to be explicitly using the indices_map_ + // array that needs to be initialized by the caller. + void SetExplicitMapping(size_t num_points) { + identity_mapping_ = false; + indices_map_.resize(num_points, kInvalidAttributeValueIndex); + } + + // Set an explicit map entry for a specific point index. + void SetPointMapEntry(PointIndex point_index, + AttributeValueIndex entry_index) { + DRACO_DCHECK(!identity_mapping_); + indices_map_[point_index] = entry_index; + } + + // Same as GeometryAttribute::GetValue(), but using point id as the input. + // Mapping to attribute value index is performed automatically. + void GetMappedValue(PointIndex point_index, void *out_data) const { + return GetValue(mapped_index(point_index), out_data); + } + +#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED + // Deduplicate |in_att| values into |this| attribute. |in_att| can be equal + // to |this|. + // Returns -1 if the deduplication failed. + AttributeValueIndex::ValueType DeduplicateValues( + const GeometryAttribute &in_att); + + // Same as above but the values read from |in_att| are sampled with the + // provided offset |in_att_offset|. + AttributeValueIndex::ValueType DeduplicateValues( + const GeometryAttribute &in_att, AttributeValueIndex in_att_offset); +#endif + + // Set attribute transform data for the attribute. The data is used to store + // the type and parameters of the transform that is applied on the attribute + // data (optional). + void SetAttributeTransformData( + std::unique_ptr transform_data) { + attribute_transform_data_ = std::move(transform_data); + } + const AttributeTransformData *GetAttributeTransformData() const { + return attribute_transform_data_.get(); + } + + private: +#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED + template + AttributeValueIndex::ValueType DeduplicateTypedValues( + const GeometryAttribute &in_att, AttributeValueIndex in_att_offset); + template + AttributeValueIndex::ValueType DeduplicateFormattedValues( + const GeometryAttribute &in_att, AttributeValueIndex in_att_offset); +#endif + + // Data storage for attribute values. GeometryAttribute itself doesn't own its + // buffer so we need to allocate it here. + std::unique_ptr attribute_buffer_; + + // Mapping between point ids and attribute value ids. + IndexTypeVector indices_map_; + AttributeValueIndex::ValueType num_unique_entries_; + // Flag when the mapping between point ids and attribute values is identity. + bool identity_mapping_; + + // If an attribute contains transformed data (e.g. quantized), we can specify + // the attribute transform here and use it to transform the attribute back to + // its original format. + std::unique_ptr attribute_transform_data_; + + friend struct PointAttributeHasher; +}; + +// Hash functor for the PointAttribute class. +struct PointAttributeHasher { + size_t operator()(const PointAttribute &attribute) const { + GeometryAttributeHasher base_hasher; + size_t hash = base_hasher(attribute); + hash = HashCombine(attribute.identity_mapping_, hash); + hash = HashCombine(attribute.num_unique_entries_, hash); + hash = HashCombine(attribute.indices_map_.size(), hash); + if (!attribute.indices_map_.empty()) { + const uint64_t indices_hash = FingerprintString( + reinterpret_cast(attribute.indices_map_.data()), + attribute.indices_map_.size()); + hash = HashCombine(indices_hash, hash); + } + if (attribute.attribute_buffer_ != nullptr) { + const uint64_t buffer_hash = FingerprintString( + reinterpret_cast(attribute.attribute_buffer_->data()), + attribute.attribute_buffer_->data_size()); + hash = HashCombine(buffer_hash, hash); + } + return hash; + } +}; + +} // namespace draco + +#endif // DRACO_ATTRIBUTES_POINT_ATTRIBUTE_H_ diff --git a/contrib/draco/src/draco/attributes/point_attribute_test.cc b/contrib/draco/src/draco/attributes/point_attribute_test.cc new file mode 100644 index 000000000..4ae23fb3c --- /dev/null +++ b/contrib/draco/src/draco/attributes/point_attribute_test.cc @@ -0,0 +1,128 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/attributes/point_attribute.h" + +#include "draco/core/draco_test_base.h" + +namespace { + +class PointAttributeTest : public ::testing::Test { + protected: + PointAttributeTest() {} +}; + +TEST_F(PointAttributeTest, TestCopy) { + // This test verifies that PointAttribute can copy data from another point + // attribute. + draco::PointAttribute pa; + pa.Init(draco::GeometryAttribute::POSITION, 1, draco::DT_INT32, false, 10); + + for (int32_t i = 0; i < 10; ++i) { + pa.SetAttributeValue(draco::AttributeValueIndex(i), &i); + } + + pa.set_unique_id(12); + + draco::PointAttribute other_pa; + other_pa.CopyFrom(pa); + + draco::PointAttributeHasher hasher; + ASSERT_EQ(hasher(pa), hasher(other_pa)); + ASSERT_EQ(pa.unique_id(), other_pa.unique_id()); + + // The hash function does not actually compute the hash from attribute values, + // so ensure the data got copied correctly as well. + for (int32_t i = 0; i < 10; ++i) { + int32_t data; + other_pa.GetValue(draco::AttributeValueIndex(i), &data); + ASSERT_EQ(data, i); + } +} + +TEST_F(PointAttributeTest, TestGetValueFloat) { + draco::PointAttribute pa; + pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5); + float points[3]; + for (int32_t i = 0; i < 5; ++i) { + points[0] = i * 3.0; + points[1] = (i * 3.0) + 1.0; + points[2] = (i * 3.0) + 2.0; + pa.SetAttributeValue(draco::AttributeValueIndex(i), &points); + } + + for (int32_t i = 0; i < 5; ++i) { + pa.GetValue(draco::AttributeValueIndex(i), &points); + ASSERT_FLOAT_EQ(points[0], i * 3.0); + ASSERT_FLOAT_EQ(points[1], (i * 3.0) + 1.0); + ASSERT_FLOAT_EQ(points[2], (i * 3.0) + 2.0); + } +} + +TEST_F(PointAttributeTest, TestGetArray) { + draco::PointAttribute pa; + pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5); + float points[3]; + for (int32_t i = 0; i < 5; ++i) { + points[0] = i * 3.0; + points[1] = (i * 3.0) + 1.0; + points[2] = (i * 3.0) + 2.0; + pa.SetAttributeValue(draco::AttributeValueIndex(i), &points); + } + + for (int32_t i = 0; i < 5; ++i) { + std::array att_value; + att_value = pa.GetValue(draco::AttributeValueIndex(i)); + ASSERT_FLOAT_EQ(att_value[0], i * 3.0); + ASSERT_FLOAT_EQ(att_value[1], (i * 3.0) + 1.0); + ASSERT_FLOAT_EQ(att_value[2], (i * 3.0) + 2.0); + } + for (int32_t i = 0; i < 5; ++i) { + std::array att_value; + EXPECT_TRUE( + (pa.GetValue(draco::AttributeValueIndex(i), &att_value))); + ASSERT_FLOAT_EQ(att_value[0], i * 3.0); + ASSERT_FLOAT_EQ(att_value[1], (i * 3.0) + 1.0); + ASSERT_FLOAT_EQ(att_value[2], (i * 3.0) + 2.0); + } +} + +TEST_F(PointAttributeTest, TestArrayReadError) { + draco::PointAttribute pa; + pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5); + float points[3]; + for (int32_t i = 0; i < 5; ++i) { + points[0] = i * 3.0; + points[1] = (i * 3.0) + 1.0; + points[2] = (i * 3.0) + 2.0; + pa.SetAttributeValue(draco::AttributeValueIndex(i), &points); + } + + std::array att_value; + EXPECT_FALSE( + (pa.GetValue(draco::AttributeValueIndex(5), &att_value))); +} + +TEST_F(PointAttributeTest, TestResize) { + draco::PointAttribute pa; + pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5); + ASSERT_EQ(pa.size(), 5); + ASSERT_EQ(pa.buffer()->data_size(), 4 * 3 * 5); + + pa.Resize(10); + ASSERT_EQ(pa.size(), 10); + ASSERT_EQ(pa.buffer()->data_size(), 4 * 3 * 10); +} + +} // namespace diff --git a/contrib/draco/src/draco/compression/attributes/attributes_decoder.cc b/contrib/draco/src/draco/compression/attributes/attributes_decoder.cc new file mode 100644 index 000000000..007dd2f43 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/attributes_decoder.cc @@ -0,0 +1,127 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/attributes_decoder.h" + +#include "draco/core/varint_decoding.h" + +namespace draco { + +AttributesDecoder::AttributesDecoder() + : point_cloud_decoder_(nullptr), point_cloud_(nullptr) {} + +bool AttributesDecoder::Init(PointCloudDecoder *decoder, PointCloud *pc) { + point_cloud_decoder_ = decoder; + point_cloud_ = pc; + return true; +} + +bool AttributesDecoder::DecodeAttributesDecoderData(DecoderBuffer *in_buffer) { + // Decode and create attributes. + uint32_t num_attributes; +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (point_cloud_decoder_->bitstream_version() < + DRACO_BITSTREAM_VERSION(2, 0)) { + if (!in_buffer->Decode(&num_attributes)) { + return false; + } + } else +#endif + { + if (!DecodeVarint(&num_attributes, in_buffer)) { + return false; + } + } + + // Check that decoded number of attributes is valid. + if (num_attributes == 0) { + return false; + } + if (num_attributes > 5 * in_buffer->remaining_size()) { + // The decoded number of attributes is unreasonably high, because at least + // five bytes of attribute descriptor data per attribute are expected. + return false; + } + + // Decode attribute descriptor data. + point_attribute_ids_.resize(num_attributes); + PointCloud *pc = point_cloud_; + for (uint32_t i = 0; i < num_attributes; ++i) { + // Decode attribute descriptor data. + uint8_t att_type, data_type, num_components, normalized; + if (!in_buffer->Decode(&att_type)) { + return false; + } + if (!in_buffer->Decode(&data_type)) { + return false; + } + if (!in_buffer->Decode(&num_components)) { + return false; + } + if (!in_buffer->Decode(&normalized)) { + return false; + } + if (att_type >= GeometryAttribute::NAMED_ATTRIBUTES_COUNT) { + return false; + } + if (data_type == DT_INVALID || data_type >= DT_TYPES_COUNT) { + return false; + } + + // Check decoded attribute descriptor data. + if (num_components == 0) { + return false; + } + + // Add the attribute to the point cloud. + const DataType draco_dt = static_cast(data_type); + GeometryAttribute ga; + ga.Init(static_cast(att_type), nullptr, + num_components, draco_dt, normalized > 0, + DataTypeLength(draco_dt) * num_components, 0); + uint32_t unique_id; +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (point_cloud_decoder_->bitstream_version() < + DRACO_BITSTREAM_VERSION(1, 3)) { + uint16_t custom_id; + if (!in_buffer->Decode(&custom_id)) { + return false; + } + // TODO(draco-eng): Add "custom_id" to attribute metadata. + unique_id = static_cast(custom_id); + ga.set_unique_id(unique_id); + } else +#endif + { + if (!DecodeVarint(&unique_id, in_buffer)) { + return false; + } + ga.set_unique_id(unique_id); + } + const int att_id = pc->AddAttribute( + std::unique_ptr(new PointAttribute(ga))); + pc->attribute(att_id)->set_unique_id(unique_id); + point_attribute_ids_[i] = att_id; + + // Update the inverse map. + if (att_id >= + static_cast(point_attribute_to_local_id_map_.size())) { + point_attribute_to_local_id_map_.resize(att_id + 1, -1); + } + point_attribute_to_local_id_map_[att_id] = i; + } + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/attributes_decoder.h b/contrib/draco/src/draco/compression/attributes/attributes_decoder.h new file mode 100644 index 000000000..5b2bb2cfe --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/attributes_decoder.h @@ -0,0 +1,97 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_H_ + +#include + +#include "draco/compression/attributes/attributes_decoder_interface.h" +#include "draco/compression/point_cloud/point_cloud_decoder.h" +#include "draco/core/decoder_buffer.h" +#include "draco/draco_features.h" +#include "draco/point_cloud/point_cloud.h" + +namespace draco { + +// Base class for decoding one or more attributes that were encoded with a +// matching AttributesEncoder. It is a basic implementation of +// AttributesDecoderInterface that provides functionality that is shared between +// all AttributesDecoders. +class AttributesDecoder : public AttributesDecoderInterface { + public: + AttributesDecoder(); + virtual ~AttributesDecoder() = default; + + // Called after all attribute decoders are created. It can be used to perform + // any custom initialization. + bool Init(PointCloudDecoder *decoder, PointCloud *pc) override; + + // Decodes any attribute decoder specific data from the |in_buffer|. + bool DecodeAttributesDecoderData(DecoderBuffer *in_buffer) override; + + int32_t GetAttributeId(int i) const override { + return point_attribute_ids_[i]; + } + int32_t GetNumAttributes() const override { + return static_cast(point_attribute_ids_.size()); + } + PointCloudDecoder *GetDecoder() const override { + return point_cloud_decoder_; + } + + // Decodes attribute data from the source buffer. + bool DecodeAttributes(DecoderBuffer *in_buffer) override { + if (!DecodePortableAttributes(in_buffer)) { + return false; + } + if (!DecodeDataNeededByPortableTransforms(in_buffer)) { + return false; + } + if (!TransformAttributesToOriginalFormat()) { + return false; + } + return true; + } + + protected: + int32_t GetLocalIdForPointAttribute(int32_t point_attribute_id) const { + const int id_map_size = + static_cast(point_attribute_to_local_id_map_.size()); + if (point_attribute_id >= id_map_size) { + return -1; + } + return point_attribute_to_local_id_map_[point_attribute_id]; + } + virtual bool DecodePortableAttributes(DecoderBuffer *in_buffer) = 0; + virtual bool DecodeDataNeededByPortableTransforms(DecoderBuffer *in_buffer) { + return true; + } + virtual bool TransformAttributesToOriginalFormat() { return true; } + + private: + // List of attribute ids that need to be decoded with this decoder. + std::vector point_attribute_ids_; + + // Map between point attribute id and the local id (i.e., the inverse of the + // |point_attribute_ids_|. + std::vector point_attribute_to_local_id_map_; + + PointCloudDecoder *point_cloud_decoder_; + PointCloud *point_cloud_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/attributes_decoder_interface.h b/contrib/draco/src/draco/compression/attributes/attributes_decoder_interface.h new file mode 100644 index 000000000..8e5cf52ac --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/attributes_decoder_interface.h @@ -0,0 +1,62 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_INTERFACE_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_INTERFACE_H_ + +#include + +#include "draco/core/decoder_buffer.h" +#include "draco/point_cloud/point_cloud.h" + +namespace draco { + +class PointCloudDecoder; + +// Interface class for decoding one or more attributes that were encoded with a +// matching AttributesEncoder. It provides only the basic interface +// that is used by the PointCloudDecoder. The actual decoding must be +// implemented in derived classes using the DecodeAttributes() method. +class AttributesDecoderInterface { + public: + AttributesDecoderInterface() = default; + virtual ~AttributesDecoderInterface() = default; + + // Called after all attribute decoders are created. It can be used to perform + // any custom initialization. + virtual bool Init(PointCloudDecoder *decoder, PointCloud *pc) = 0; + + // Decodes any attribute decoder specific data from the |in_buffer|. + virtual bool DecodeAttributesDecoderData(DecoderBuffer *in_buffer) = 0; + + // Decode attribute data from the source buffer. Needs to be implemented by + // the derived classes. + virtual bool DecodeAttributes(DecoderBuffer *in_buffer) = 0; + + virtual int32_t GetAttributeId(int i) const = 0; + virtual int32_t GetNumAttributes() const = 0; + virtual PointCloudDecoder *GetDecoder() const = 0; + + // Returns an attribute containing data processed by the attribute transform. + // (see TransformToPortableFormat() method). This data is guaranteed to be + // same for encoder and decoder and it can be used by predictors. + virtual const PointAttribute *GetPortableAttribute( + int32_t /* point_attribute_id */) { + return nullptr; + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_INTERFACE_H_ diff --git a/contrib/draco/src/draco/compression/attributes/attributes_encoder.cc b/contrib/draco/src/draco/compression/attributes/attributes_encoder.cc new file mode 100644 index 000000000..797c62f30 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/attributes_encoder.cc @@ -0,0 +1,49 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/attributes_encoder.h" + +#include "draco/core/varint_encoding.h" + +namespace draco { + +AttributesEncoder::AttributesEncoder() + : point_cloud_encoder_(nullptr), point_cloud_(nullptr) {} + +AttributesEncoder::AttributesEncoder(int att_id) : AttributesEncoder() { + AddAttributeId(att_id); +} + +bool AttributesEncoder::Init(PointCloudEncoder *encoder, const PointCloud *pc) { + point_cloud_encoder_ = encoder; + point_cloud_ = pc; + return true; +} + +bool AttributesEncoder::EncodeAttributesEncoderData(EncoderBuffer *out_buffer) { + // Encode data about all attributes. + EncodeVarint(num_attributes(), out_buffer); + for (uint32_t i = 0; i < num_attributes(); ++i) { + const int32_t att_id = point_attribute_ids_[i]; + const PointAttribute *const pa = point_cloud_->attribute(att_id); + out_buffer->Encode(static_cast(pa->attribute_type())); + out_buffer->Encode(static_cast(pa->data_type())); + out_buffer->Encode(static_cast(pa->num_components())); + out_buffer->Encode(static_cast(pa->normalized())); + EncodeVarint(pa->unique_id(), out_buffer); + } + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/attributes_encoder.h b/contrib/draco/src/draco/compression/attributes/attributes_encoder.h new file mode 100644 index 000000000..9de846ae6 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/attributes_encoder.h @@ -0,0 +1,154 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_ENCODER_H_ + +#include "draco/attributes/point_attribute.h" +#include "draco/core/encoder_buffer.h" +#include "draco/point_cloud/point_cloud.h" + +namespace draco { + +class PointCloudEncoder; + +// Base class for encoding one or more attributes of a PointCloud (or other +// geometry). This base class provides only the basic interface that is used +// by the PointCloudEncoder. +class AttributesEncoder { + public: + AttributesEncoder(); + // Constructs an attribute encoder associated with a given point attribute. + explicit AttributesEncoder(int point_attrib_id); + virtual ~AttributesEncoder() = default; + + // Called after all attribute encoders are created. It can be used to perform + // any custom initialization, including setting up attribute dependencies. + // Note: no data should be encoded in this function, because the decoder may + // process encoders in a different order from the decoder. + virtual bool Init(PointCloudEncoder *encoder, const PointCloud *pc); + + // Encodes data needed by the target attribute decoder. + virtual bool EncodeAttributesEncoderData(EncoderBuffer *out_buffer); + + // Returns a unique identifier of the given encoder type, that is used during + // decoding to construct the corresponding attribute decoder. + virtual uint8_t GetUniqueId() const = 0; + + // Encode attribute data to the target buffer. + virtual bool EncodeAttributes(EncoderBuffer *out_buffer) { + if (!TransformAttributesToPortableFormat()) { + return false; + } + if (!EncodePortableAttributes(out_buffer)) { + return false; + } + // Encode data needed by portable transforms after the attribute is encoded. + // This corresponds to the order in which the data is going to be decoded by + // the decoder. + if (!EncodeDataNeededByPortableTransforms(out_buffer)) { + return false; + } + return true; + } + + // Returns the number of attributes that need to be encoded before the + // specified attribute is encoded. + // Note that the attribute is specified by its point attribute id. + virtual int NumParentAttributes(int32_t /* point_attribute_id */) const { + return 0; + } + + virtual int GetParentAttributeId(int32_t /* point_attribute_id */, + int32_t /* parent_i */) const { + return -1; + } + + // Marks a given attribute as a parent of another attribute. + virtual bool MarkParentAttribute(int32_t /* point_attribute_id */) { + return false; + } + + // Returns an attribute containing data processed by the attribute transform. + // (see TransformToPortableFormat() method). This data is guaranteed to be + // encoded losslessly and it can be safely used for predictors. + virtual const PointAttribute *GetPortableAttribute( + int32_t /* point_attribute_id */) { + return nullptr; + } + + void AddAttributeId(int32_t id) { + point_attribute_ids_.push_back(id); + if (id >= static_cast(point_attribute_to_local_id_map_.size())) { + point_attribute_to_local_id_map_.resize(id + 1, -1); + } + point_attribute_to_local_id_map_[id] = + static_cast(point_attribute_ids_.size()) - 1; + } + + // Sets new attribute point ids (replacing the existing ones). + void SetAttributeIds(const std::vector &point_attribute_ids) { + point_attribute_ids_.clear(); + point_attribute_to_local_id_map_.clear(); + for (int32_t att_id : point_attribute_ids) { + AddAttributeId(att_id); + } + } + + int32_t GetAttributeId(int i) const { return point_attribute_ids_[i]; } + uint32_t num_attributes() const { + return static_cast(point_attribute_ids_.size()); + } + PointCloudEncoder *encoder() const { return point_cloud_encoder_; } + + protected: + // Transforms the input attribute data into a form that should be losslessly + // encoded (transform itself can be lossy). + virtual bool TransformAttributesToPortableFormat() { return true; } + + // Losslessly encodes data of all portable attributes. + // Precondition: All attributes must have been transformed into portable + // format at this point (see TransformAttributesToPortableFormat() method). + virtual bool EncodePortableAttributes(EncoderBuffer *out_buffer) = 0; + + // Encodes any data needed to revert the transform to portable format for each + // attribute (e.g. data needed for dequantization of quantized values). + virtual bool EncodeDataNeededByPortableTransforms(EncoderBuffer *out_buffer) { + return true; + } + + int32_t GetLocalIdForPointAttribute(int32_t point_attribute_id) const { + const int id_map_size = + static_cast(point_attribute_to_local_id_map_.size()); + if (point_attribute_id >= id_map_size) { + return -1; + } + return point_attribute_to_local_id_map_[point_attribute_id]; + } + + private: + // List of attribute ids that need to be encoded with this encoder. + std::vector point_attribute_ids_; + + // Map between point attribute id and the local id (i.e., the inverse of the + // |point_attribute_ids_|. + std::vector point_attribute_to_local_id_map_; + + PointCloudEncoder *point_cloud_encoder_; + const PointCloud *point_cloud_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.cc b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.cc new file mode 100644 index 000000000..e4d53485d --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.cc @@ -0,0 +1,556 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/kd_tree_attributes_decoder.h" + +#include "draco/compression/attributes/kd_tree_attributes_shared.h" +#include "draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h" +#include "draco/compression/point_cloud/algorithms/float_points_tree_decoder.h" +#include "draco/compression/point_cloud/point_cloud_decoder.h" +#include "draco/core/draco_types.h" +#include "draco/core/varint_decoding.h" + +namespace draco { + +// attribute, offset_dimensionality, data_type, data_size, num_components +using AttributeTuple = + std::tuple; + +// Output iterator that is used to decode values directly into the data buffer +// of the modified PointAttribute. +// The extension of this iterator beyond the DT_UINT32 concerns itself only with +// the size of the data for efficiency, not the type. DataType is conveyed in +// but is an unused field populated for any future logic/special casing. +// DT_UINT32 and all other 4-byte types are naturally supported from the size of +// data in the kd tree encoder. DT_UINT16 and DT_UINT8 are supported by way +// of byte copies into a temporary memory buffer. +template +class PointAttributeVectorOutputIterator { + typedef PointAttributeVectorOutputIterator Self; + + public: + PointAttributeVectorOutputIterator( + PointAttributeVectorOutputIterator &&that) = default; + + explicit PointAttributeVectorOutputIterator( + const std::vector &atts) + : attributes_(atts), point_id_(0) { + DRACO_DCHECK_GE(atts.size(), 1); + uint32_t required_decode_bytes = 0; + for (auto index = 0; index < attributes_.size(); index++) { + const AttributeTuple &att = attributes_[index]; + required_decode_bytes = (std::max)(required_decode_bytes, + std::get<3>(att) * std::get<4>(att)); + } + memory_.resize(required_decode_bytes); + data_ = memory_.data(); + } + + const Self &operator++() { + ++point_id_; + return *this; + } + + // We do not want to do ANY copying of this constructor so this particular + // operator is disabled for performance reasons. + // Self operator++(int) { + // Self copy = *this; + // ++point_id_; + // return copy; + // } + + Self &operator*() { return *this; } + // Still needed in some cases. + // TODO(hemmer): remove. + // hardcoded to 3 based on legacy usage. + const Self &operator=(const VectorD &val) { + DRACO_DCHECK_EQ(attributes_.size(), 1); // Expect only ONE attribute. + AttributeTuple &att = attributes_[0]; + PointAttribute *attribute = std::get<0>(att); + const uint32_t &offset = std::get<1>(att); + DRACO_DCHECK_EQ(offset, 0); // expected to be zero + attribute->SetAttributeValue(attribute->mapped_index(point_id_), + &val[0] + offset); + return *this; + } + // Additional operator taking std::vector as argument. + const Self &operator=(const std::vector &val) { + for (auto index = 0; index < attributes_.size(); index++) { + AttributeTuple &att = attributes_[index]; + PointAttribute *attribute = std::get<0>(att); + const uint32_t &offset = std::get<1>(att); + const uint32_t &data_size = std::get<3>(att); + const uint32_t &num_components = std::get<4>(att); + const uint32_t *data_source = val.data() + offset; + if (data_size < 4) { // handle uint16_t, uint8_t + // selectively copy data bytes + uint8_t *data_counter = data_; + for (uint32_t index = 0; index < num_components; + index += 1, data_counter += data_size) { + std::memcpy(data_counter, data_source + index, data_size); + } + // redirect to copied data + data_source = reinterpret_cast(data_); + } + const AttributeValueIndex avi = attribute->mapped_index(point_id_); + if (avi >= static_cast(attribute->size())) { + return *this; + } + attribute->SetAttributeValue(avi, data_source); + } + return *this; + } + + private: + // preallocated memory for buffering different data sizes. Never reallocated. + std::vector memory_; + uint8_t *data_; + std::vector attributes_; + PointIndex point_id_; + + // NO COPY + PointAttributeVectorOutputIterator( + const PointAttributeVectorOutputIterator &that) = delete; + PointAttributeVectorOutputIterator &operator=( + PointAttributeVectorOutputIterator const &) = delete; +}; + +KdTreeAttributesDecoder::KdTreeAttributesDecoder() {} + +bool KdTreeAttributesDecoder::DecodePortableAttributes( + DecoderBuffer *in_buffer) { + if (in_buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 3)) { + // Old bitstream does everything in the + // DecodeDataNeededByPortableTransforms() method. + return true; + } + uint8_t compression_level = 0; + if (!in_buffer->Decode(&compression_level)) { + return false; + } + const int32_t num_points = GetDecoder()->point_cloud()->num_points(); + + // Decode data using the kd tree decoding into integer (portable) attributes. + // We first need to go over all attributes and create a new portable storage + // for those attributes that need it (floating point attributes that have to + // be dequantized after decoding). + + const int num_attributes = GetNumAttributes(); + uint32_t total_dimensionality = 0; // position is a required dimension + std::vector atts(num_attributes); + + for (int i = 0; i < GetNumAttributes(); ++i) { + const int att_id = GetAttributeId(i); + PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id); + // All attributes have the same number of values and identity mapping + // between PointIndex and AttributeValueIndex. + att->Reset(num_points); + att->SetIdentityMapping(); + + PointAttribute *target_att = nullptr; + if (att->data_type() == DT_UINT32 || att->data_type() == DT_UINT16 || + att->data_type() == DT_UINT8) { + // We can decode to these attributes directly. + target_att = att; + } else if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 || + att->data_type() == DT_INT8) { + // Prepare storage for data that is used to convert unsigned values back + // to the signed ones. + for (int c = 0; c < att->num_components(); ++c) { + min_signed_values_.push_back(0); + } + target_att = att; + } else if (att->data_type() == DT_FLOAT32) { + // Create a portable attribute that will hold the decoded data. We will + // dequantize the decoded data to the final attribute later on. + const int num_components = att->num_components(); + GeometryAttribute va; + va.Init(att->attribute_type(), nullptr, num_components, DT_UINT32, false, + num_components * DataTypeLength(DT_UINT32), 0); + std::unique_ptr port_att(new PointAttribute(va)); + port_att->SetIdentityMapping(); + port_att->Reset(num_points); + quantized_portable_attributes_.push_back(std::move(port_att)); + target_att = quantized_portable_attributes_.back().get(); + } else { + // Unsupported type. + return false; + } + // Add attribute to the output iterator used by the core algorithm. + const DataType data_type = target_att->data_type(); + const uint32_t data_size = (std::max)(0, DataTypeLength(data_type)); + const uint32_t num_components = target_att->num_components(); + atts[i] = std::make_tuple(target_att, total_dimensionality, data_type, + data_size, num_components); + total_dimensionality += num_components; + } + PointAttributeVectorOutputIterator out_it(atts); + + switch (compression_level) { + case 0: { + DynamicIntegerPointsKdTreeDecoder<0> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 1: { + DynamicIntegerPointsKdTreeDecoder<1> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 2: { + DynamicIntegerPointsKdTreeDecoder<2> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 3: { + DynamicIntegerPointsKdTreeDecoder<3> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 4: { + DynamicIntegerPointsKdTreeDecoder<4> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 5: { + DynamicIntegerPointsKdTreeDecoder<5> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 6: { + DynamicIntegerPointsKdTreeDecoder<6> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + default: + return false; + } + return true; +} + +bool KdTreeAttributesDecoder::DecodeDataNeededByPortableTransforms( + DecoderBuffer *in_buffer) { + if (in_buffer->bitstream_version() >= DRACO_BITSTREAM_VERSION(2, 3)) { + // Decode quantization data for each attribute that need it. + // TODO(ostava): This should be moved to AttributeQuantizationTransform. + std::vector min_value; + for (int i = 0; i < GetNumAttributes(); ++i) { + const int att_id = GetAttributeId(i); + const PointAttribute *const att = + GetDecoder()->point_cloud()->attribute(att_id); + if (att->data_type() == DT_FLOAT32) { + const int num_components = att->num_components(); + min_value.resize(num_components); + if (!in_buffer->Decode(&min_value[0], sizeof(float) * num_components)) { + return false; + } + float max_value_dif; + if (!in_buffer->Decode(&max_value_dif)) { + return false; + } + uint8_t quantization_bits; + if (!in_buffer->Decode(&quantization_bits) || quantization_bits > 31) { + return false; + } + AttributeQuantizationTransform transform; + if (!transform.SetParameters(quantization_bits, min_value.data(), + num_components, max_value_dif)) { + return false; + } + const int num_transforms = + static_cast(attribute_quantization_transforms_.size()); + if (!transform.TransferToAttribute( + quantized_portable_attributes_[num_transforms].get())) { + return false; + } + attribute_quantization_transforms_.push_back(transform); + } + } + + // Decode transform data for signed integer attributes. + for (int i = 0; i < min_signed_values_.size(); ++i) { + int32_t val; + if (!DecodeVarint(&val, in_buffer)) { + return false; + } + min_signed_values_[i] = val; + } + return true; + } +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + // Handle old bitstream + // Figure out the total dimensionality of the point cloud + const uint32_t attribute_count = GetNumAttributes(); + uint32_t total_dimensionality = 0; // position is a required dimension + std::vector atts(attribute_count); + for (auto attribute_index = 0; + static_cast(attribute_index) < attribute_count; + attribute_index += 1) // increment the dimensionality as needed... + { + const int att_id = GetAttributeId(attribute_index); + PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id); + const DataType data_type = att->data_type(); + const uint32_t data_size = (std::max)(0, DataTypeLength(data_type)); + const uint32_t num_components = att->num_components(); + if (data_size > 4) { + return false; + } + + atts[attribute_index] = std::make_tuple( + att, total_dimensionality, data_type, data_size, num_components); + // everything is treated as 32bit in the encoder. + total_dimensionality += num_components; + } + + const int att_id = GetAttributeId(0); + PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id); + att->SetIdentityMapping(); + // Decode method + uint8_t method; + if (!in_buffer->Decode(&method)) { + return false; + } + if (method == KdTreeAttributesEncodingMethod::kKdTreeQuantizationEncoding) { + uint8_t compression_level = 0; + if (!in_buffer->Decode(&compression_level)) { + return false; + } + uint32_t num_points = 0; + if (!in_buffer->Decode(&num_points)) { + return false; + } + att->Reset(num_points); + FloatPointsTreeDecoder decoder; + decoder.set_num_points_from_header(num_points); + PointAttributeVectorOutputIterator out_it(atts); + if (!decoder.DecodePointCloud(in_buffer, out_it)) { + return false; + } + } else if (method == KdTreeAttributesEncodingMethod::kKdTreeIntegerEncoding) { + uint8_t compression_level = 0; + if (!in_buffer->Decode(&compression_level)) { + return false; + } + if (6 < compression_level) { + DRACO_LOGE( + "KdTreeAttributesDecoder: compression level %i not supported.\n", + compression_level); + return false; + } + + uint32_t num_points; + if (!in_buffer->Decode(&num_points)) { + return false; + } + + for (auto attribute_index = 0; + static_cast(attribute_index) < attribute_count; + attribute_index += 1) { + const int att_id = GetAttributeId(attribute_index); + PointAttribute *const attr = + GetDecoder()->point_cloud()->attribute(att_id); + attr->Reset(num_points); + attr->SetIdentityMapping(); + }; + + PointAttributeVectorOutputIterator out_it(atts); + + switch (compression_level) { + case 0: { + DynamicIntegerPointsKdTreeDecoder<0> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 1: { + DynamicIntegerPointsKdTreeDecoder<1> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 2: { + DynamicIntegerPointsKdTreeDecoder<2> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 3: { + DynamicIntegerPointsKdTreeDecoder<3> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 4: { + DynamicIntegerPointsKdTreeDecoder<4> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 5: { + DynamicIntegerPointsKdTreeDecoder<5> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 6: { + DynamicIntegerPointsKdTreeDecoder<6> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + default: + return false; + } + } else { + // Invalid method. + return false; + } + return true; +#else + return false; +#endif +} + +template +bool KdTreeAttributesDecoder::TransformAttributeBackToSignedType( + PointAttribute *att, int num_processed_signed_components) { + typedef typename std::make_unsigned::type UnsignedType; + std::vector unsigned_val(att->num_components()); + std::vector signed_val(att->num_components()); + + for (AttributeValueIndex avi(0); avi < static_cast(att->size()); + ++avi) { + att->GetValue(avi, &unsigned_val[0]); + for (int c = 0; c < att->num_components(); ++c) { + // Up-cast |unsigned_val| to int32_t to ensure we don't overflow it for + // smaller data types. + signed_val[c] = static_cast( + static_cast(unsigned_val[c]) + + min_signed_values_[num_processed_signed_components + c]); + } + att->SetAttributeValue(avi, &signed_val[0]); + } + return true; +} + +bool KdTreeAttributesDecoder::TransformAttributesToOriginalFormat() { + if (quantized_portable_attributes_.empty() && min_signed_values_.empty()) { + return true; + } + int num_processed_quantized_attributes = 0; + int num_processed_signed_components = 0; + // Dequantize attributes that needed it. + for (int i = 0; i < GetNumAttributes(); ++i) { + const int att_id = GetAttributeId(i); + PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id); + if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 || + att->data_type() == DT_INT8) { + std::vector unsigned_val(att->num_components()); + std::vector signed_val(att->num_components()); + // Values are stored as unsigned in the attribute, make them signed again. + if (att->data_type() == DT_INT32) { + if (!TransformAttributeBackToSignedType( + att, num_processed_signed_components)) { + return false; + } + } else if (att->data_type() == DT_INT16) { + if (!TransformAttributeBackToSignedType( + att, num_processed_signed_components)) { + return false; + } + } else if (att->data_type() == DT_INT8) { + if (!TransformAttributeBackToSignedType( + att, num_processed_signed_components)) { + return false; + } + } + num_processed_signed_components += att->num_components(); + } else if (att->data_type() == DT_FLOAT32) { + // TODO(ostava): This code should be probably moved out to attribute + // transform and shared with the SequentialQuantizationAttributeDecoder. + + const PointAttribute *const src_att = + quantized_portable_attributes_[num_processed_quantized_attributes] + .get(); + + const AttributeQuantizationTransform &transform = + attribute_quantization_transforms_ + [num_processed_quantized_attributes]; + + num_processed_quantized_attributes++; + + if (GetDecoder()->options()->GetAttributeBool( + att->attribute_type(), "skip_attribute_transform", false)) { + // Attribute transform should not be performed. In this case, we replace + // the output geometry attribute with the portable attribute. + // TODO(ostava): We can potentially avoid this copy by introducing a new + // mechanism that would allow to use the final attributes as portable + // attributes for predictors that may need them. + att->CopyFrom(*src_att); + continue; + } + + // Convert all quantized values back to floats. + const int32_t max_quantized_value = + (1u << static_cast(transform.quantization_bits())) - 1; + const int num_components = att->num_components(); + const int entry_size = sizeof(float) * num_components; + const std::unique_ptr att_val(new float[num_components]); + int quant_val_id = 0; + int out_byte_pos = 0; + Dequantizer dequantizer; + if (!dequantizer.Init(transform.range(), max_quantized_value)) { + return false; + } + const uint32_t *const portable_attribute_data = + reinterpret_cast( + src_att->GetAddress(AttributeValueIndex(0))); + for (uint32_t i = 0; i < src_att->size(); ++i) { + for (int c = 0; c < num_components; ++c) { + float value = dequantizer.DequantizeFloat( + portable_attribute_data[quant_val_id++]); + value = value + transform.min_value(c); + att_val[c] = value; + } + // Store the floating point value into the attribute buffer. + att->buffer()->Write(out_byte_pos, att_val.get(), entry_size); + out_byte_pos += entry_size; + } + } + } + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.h b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.h new file mode 100644 index 000000000..87338d6b0 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.h @@ -0,0 +1,46 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_DECODER_H_ + +#include "draco/attributes/attribute_quantization_transform.h" +#include "draco/compression/attributes/attributes_decoder.h" + +namespace draco { + +// Decodes attributes encoded with the KdTreeAttributesEncoder. +class KdTreeAttributesDecoder : public AttributesDecoder { + public: + KdTreeAttributesDecoder(); + + protected: + bool DecodePortableAttributes(DecoderBuffer *in_buffer) override; + bool DecodeDataNeededByPortableTransforms(DecoderBuffer *in_buffer) override; + bool TransformAttributesToOriginalFormat() override; + + private: + template + bool TransformAttributeBackToSignedType(PointAttribute *att, + int num_processed_signed_components); + + std::vector + attribute_quantization_transforms_; + std::vector min_signed_values_; + std::vector> quantized_portable_attributes_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.cc b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.cc new file mode 100644 index 000000000..b70deb9e0 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.cc @@ -0,0 +1,305 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/kd_tree_attributes_encoder.h" + +#include "draco/compression/attributes/kd_tree_attributes_shared.h" +#include "draco/compression/attributes/point_d_vector.h" +#include "draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.h" +#include "draco/compression/point_cloud/algorithms/float_points_tree_encoder.h" +#include "draco/compression/point_cloud/point_cloud_encoder.h" +#include "draco/core/varint_encoding.h" + +namespace draco { + +KdTreeAttributesEncoder::KdTreeAttributesEncoder() : num_components_(0) {} + +KdTreeAttributesEncoder::KdTreeAttributesEncoder(int att_id) + : AttributesEncoder(att_id), num_components_(0) {} + +bool KdTreeAttributesEncoder::TransformAttributesToPortableFormat() { + // Convert any of the input attributes into a format that can be processed by + // the kd tree encoder (quantization of floating attributes for now). + const size_t num_points = encoder()->point_cloud()->num_points(); + int num_components = 0; + for (uint32_t i = 0; i < num_attributes(); ++i) { + const int att_id = GetAttributeId(i); + const PointAttribute *const att = + encoder()->point_cloud()->attribute(att_id); + num_components += att->num_components(); + } + num_components_ = num_components; + + // Go over all attributes and quantize them if needed. + for (uint32_t i = 0; i < num_attributes(); ++i) { + const int att_id = GetAttributeId(i); + const PointAttribute *const att = + encoder()->point_cloud()->attribute(att_id); + if (att->data_type() == DT_FLOAT32) { + // Quantization path. + AttributeQuantizationTransform attribute_quantization_transform; + const int quantization_bits = encoder()->options()->GetAttributeInt( + att_id, "quantization_bits", -1); + if (quantization_bits < 1) { + return false; + } + if (encoder()->options()->IsAttributeOptionSet(att_id, + "quantization_origin") && + encoder()->options()->IsAttributeOptionSet(att_id, + "quantization_range")) { + // Quantization settings are explicitly specified in the provided + // options. + std::vector quantization_origin(att->num_components()); + encoder()->options()->GetAttributeVector(att_id, "quantization_origin", + att->num_components(), + &quantization_origin[0]); + const float range = encoder()->options()->GetAttributeFloat( + att_id, "quantization_range", 1.f); + attribute_quantization_transform.SetParameters( + quantization_bits, quantization_origin.data(), + att->num_components(), range); + } else { + // Compute quantization settings from the attribute values. + if (!attribute_quantization_transform.ComputeParameters( + *att, quantization_bits)) { + return false; + } + } + attribute_quantization_transforms_.push_back( + attribute_quantization_transform); + // Store the quantized attribute in an array that will be used when we do + // the actual encoding of the data. + auto portable_att = + attribute_quantization_transform.InitTransformedAttribute(*att, + num_points); + attribute_quantization_transform.TransformAttribute(*att, {}, + portable_att.get()); + quantized_portable_attributes_.push_back(std::move(portable_att)); + } else if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 || + att->data_type() == DT_INT8) { + // For signed types, find the minimum value for each component. These + // values are going to be used to transform the attribute values to + // unsigned integers that can be processed by the core kd tree algorithm. + std::vector min_value(att->num_components(), + std::numeric_limits::max()); + std::vector act_value(att->num_components()); + for (AttributeValueIndex avi(0); avi < static_cast(att->size()); + ++avi) { + att->ConvertValue(avi, &act_value[0]); + for (int c = 0; c < att->num_components(); ++c) { + if (min_value[c] > act_value[c]) { + min_value[c] = act_value[c]; + } + } + } + for (int c = 0; c < att->num_components(); ++c) { + min_signed_values_.push_back(min_value[c]); + } + } + } + return true; +} + +bool KdTreeAttributesEncoder::EncodeDataNeededByPortableTransforms( + EncoderBuffer *out_buffer) { + // Store quantization settings for all attributes that need it. + for (int i = 0; i < attribute_quantization_transforms_.size(); ++i) { + attribute_quantization_transforms_[i].EncodeParameters(out_buffer); + } + + // Encode data needed for transforming signed integers to unsigned ones. + for (int i = 0; i < min_signed_values_.size(); ++i) { + EncodeVarint(min_signed_values_[i], out_buffer); + } + return true; +} + +bool KdTreeAttributesEncoder::EncodePortableAttributes( + EncoderBuffer *out_buffer) { + // Encode the data using the kd tree encoder algorithm. The data is first + // copied to a PointDVector that provides all the API expected by the core + // encoding algorithm. + + // We limit the maximum value of compression_level to 6 as we don't currently + // have viable algorithms for higher compression levels. + uint8_t compression_level = + std::min(10 - encoder()->options()->GetSpeed(), 6); + DRACO_DCHECK_LE(compression_level, 6); + + if (compression_level == 6 && num_components_ > 15) { + // Don't use compression level for CL >= 6. Axis selection is currently + // encoded using 4 bits. + compression_level = 5; + } + + out_buffer->Encode(compression_level); + + // Init PointDVector. The number of dimensions is equal to the total number + // of dimensions across all attributes. + const int num_points = encoder()->point_cloud()->num_points(); + PointDVector point_vector(num_points, num_components_); + + int num_processed_components = 0; + int num_processed_quantized_attributes = 0; + int num_processed_signed_components = 0; + // Copy data to the point vector. + for (uint32_t i = 0; i < num_attributes(); ++i) { + const int att_id = GetAttributeId(i); + const PointAttribute *const att = + encoder()->point_cloud()->attribute(att_id); + const PointAttribute *source_att = nullptr; + if (att->data_type() == DT_UINT32 || att->data_type() == DT_UINT16 || + att->data_type() == DT_UINT8 || att->data_type() == DT_INT32 || + att->data_type() == DT_INT16 || att->data_type() == DT_INT8) { + // Use the original attribute. + source_att = att; + } else if (att->data_type() == DT_FLOAT32) { + // Use the portable (quantized) attribute instead. + source_att = + quantized_portable_attributes_[num_processed_quantized_attributes] + .get(); + num_processed_quantized_attributes++; + } else { + // Unsupported data type. + return false; + } + + if (source_att == nullptr) { + return false; + } + + // Copy source_att to the vector. + if (source_att->data_type() == DT_UINT32) { + // If the data type is the same as the one used by the point vector, we + // can directly copy individual elements. + for (PointIndex pi(0); pi < num_points; ++pi) { + const AttributeValueIndex avi = source_att->mapped_index(pi); + const uint8_t *const att_value_address = source_att->GetAddress(avi); + point_vector.CopyAttribute(source_att->num_components(), + num_processed_components, pi.value(), + att_value_address); + } + } else if (source_att->data_type() == DT_INT32 || + source_att->data_type() == DT_INT16 || + source_att->data_type() == DT_INT8) { + // Signed values need to be converted to unsigned before they are stored + // in the point vector. + std::vector signed_point(source_att->num_components()); + std::vector unsigned_point(source_att->num_components()); + for (PointIndex pi(0); pi < num_points; ++pi) { + const AttributeValueIndex avi = source_att->mapped_index(pi); + source_att->ConvertValue(avi, &signed_point[0]); + for (int c = 0; c < source_att->num_components(); ++c) { + unsigned_point[c] = + signed_point[c] - + min_signed_values_[num_processed_signed_components + c]; + } + + point_vector.CopyAttribute(source_att->num_components(), + num_processed_components, pi.value(), + &unsigned_point[0]); + } + num_processed_signed_components += source_att->num_components(); + } else { + // If the data type of the attribute is different, we have to convert the + // value before we put it to the point vector. + std::vector point(source_att->num_components()); + for (PointIndex pi(0); pi < num_points; ++pi) { + const AttributeValueIndex avi = source_att->mapped_index(pi); + source_att->ConvertValue(avi, &point[0]); + point_vector.CopyAttribute(source_att->num_components(), + num_processed_components, pi.value(), + point.data()); + } + } + num_processed_components += source_att->num_components(); + } + + // Compute the maximum bit length needed for the kd tree encoding. + int num_bits = 0; + const uint32_t *data = point_vector[0]; + for (int i = 0; i < num_points * num_components_; ++i) { + if (data[i] > 0) { + const int msb = MostSignificantBit(data[i]) + 1; + if (msb > num_bits) { + num_bits = msb; + } + } + } + + switch (compression_level) { + case 6: { + DynamicIntegerPointsKdTreeEncoder<6> points_encoder(num_components_); + if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(), + num_bits, out_buffer)) { + return false; + } + break; + } + case 5: { + DynamicIntegerPointsKdTreeEncoder<5> points_encoder(num_components_); + if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(), + num_bits, out_buffer)) { + return false; + } + break; + } + case 4: { + DynamicIntegerPointsKdTreeEncoder<4> points_encoder(num_components_); + if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(), + num_bits, out_buffer)) { + return false; + } + break; + } + case 3: { + DynamicIntegerPointsKdTreeEncoder<3> points_encoder(num_components_); + if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(), + num_bits, out_buffer)) { + return false; + } + break; + } + case 2: { + DynamicIntegerPointsKdTreeEncoder<2> points_encoder(num_components_); + if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(), + num_bits, out_buffer)) { + return false; + } + break; + } + case 1: { + DynamicIntegerPointsKdTreeEncoder<1> points_encoder(num_components_); + if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(), + num_bits, out_buffer)) { + return false; + } + break; + } + case 0: { + DynamicIntegerPointsKdTreeEncoder<0> points_encoder(num_components_); + if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(), + num_bits, out_buffer)) { + return false; + } + break; + } + // Compression level and/or encoding speed seem wrong. + default: + return false; + } + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.h b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.h new file mode 100644 index 000000000..80748e0bf --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.h @@ -0,0 +1,51 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_ENCODER_H_ + +#include "draco/attributes/attribute_quantization_transform.h" +#include "draco/compression/attributes/attributes_encoder.h" +#include "draco/compression/config/compression_shared.h" + +namespace draco { + +// Encodes all attributes of a given PointCloud using one of the available +// Kd-tree compression methods. +// See compression/point_cloud/point_cloud_kd_tree_encoder.h for more details. +class KdTreeAttributesEncoder : public AttributesEncoder { + public: + KdTreeAttributesEncoder(); + explicit KdTreeAttributesEncoder(int att_id); + + uint8_t GetUniqueId() const override { return KD_TREE_ATTRIBUTE_ENCODER; } + + protected: + bool TransformAttributesToPortableFormat() override; + bool EncodePortableAttributes(EncoderBuffer *out_buffer) override; + bool EncodeDataNeededByPortableTransforms(EncoderBuffer *out_buffer) override; + + private: + std::vector + attribute_quantization_transforms_; + // Min signed values are used to transform signed integers into unsigned ones + // (by subtracting the min signed value for each component). + std::vector min_signed_values_; + std::vector> quantized_portable_attributes_; + int num_components_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_shared.h b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_shared.h new file mode 100644 index 000000000..94841a91d --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_shared.h @@ -0,0 +1,28 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_SHARED_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_SHARED_H_ + +namespace draco { + +// Defines types of kD-tree compression +enum KdTreeAttributesEncodingMethod { + kKdTreeQuantizationEncoding = 0, + kKdTreeIntegerEncoding +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_SHARED_H_ diff --git a/contrib/draco/src/draco/compression/attributes/linear_sequencer.h b/contrib/draco/src/draco/compression/attributes/linear_sequencer.h new file mode 100644 index 000000000..7d9b52641 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/linear_sequencer.h @@ -0,0 +1,51 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_LINEAR_SEQUENCER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_LINEAR_SEQUENCER_H_ + +#include "draco/compression/attributes/points_sequencer.h" + +namespace draco { + +// A simple sequencer that generates a linear sequence [0, num_points - 1]. +// I.e., the order of the points is preserved for the input data. +class LinearSequencer : public PointsSequencer { + public: + explicit LinearSequencer(int32_t num_points) : num_points_(num_points) {} + + bool UpdatePointToAttributeIndexMapping(PointAttribute *attribute) override { + attribute->SetIdentityMapping(); + return true; + } + + protected: + bool GenerateSequenceInternal() override { + if (num_points_ < 0) { + return false; + } + out_point_ids()->resize(num_points_); + for (int i = 0; i < num_points_; ++i) { + out_point_ids()->at(i) = PointIndex(i); + } + return true; + } + + private: + int32_t num_points_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_LINEAR_SEQUENCER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/mesh_attribute_indices_encoding_data.h b/contrib/draco/src/draco/compression/attributes/mesh_attribute_indices_encoding_data.h new file mode 100644 index 000000000..9a358e447 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/mesh_attribute_indices_encoding_data.h @@ -0,0 +1,58 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_MESH_ATTRIBUTE_INDICES_ENCODING_DATA_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_MESH_ATTRIBUTE_INDICES_ENCODING_DATA_H_ + +#include + +#include + +#include "draco/attributes/geometry_indices.h" + +namespace draco { + +// Data used for encoding and decoding of mesh attributes. +struct MeshAttributeIndicesEncodingData { + MeshAttributeIndicesEncodingData() : num_values(0) {} + + void Init(int num_vertices) { + vertex_to_encoded_attribute_value_index_map.resize(num_vertices); + + // We expect to store one value for each vertex. + encoded_attribute_value_index_to_corner_map.reserve(num_vertices); + } + + // Array for storing the corner ids in the order their associated attribute + // entries were encoded/decoded. For every encoded attribute value entry we + // store exactly one corner. I.e., this is the mapping between an encoded + // attribute entry ids and corner ids. This map is needed for example by + // prediction schemes. Note that not all corners are included in this map, + // e.g., if multiple corners share the same attribute value, only one of these + // corners will be usually included. + std::vector encoded_attribute_value_index_to_corner_map; + + // Map for storing encoding order of attribute entries for each vertex. + // i.e. Mapping between vertices and their corresponding attribute entry ids + // that are going to be used by the decoder. + // -1 if an attribute entry hasn't been encoded/decoded yet. + std::vector vertex_to_encoded_attribute_value_index_map; + + // Total number of encoded/decoded attribute entries. + int num_values; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_MESH_ATTRIBUTE_INDICES_ENCODING_DATA_H_ diff --git a/contrib/draco/src/draco/compression/attributes/normal_compression_utils.h b/contrib/draco/src/draco/compression/attributes/normal_compression_utils.h new file mode 100644 index 000000000..8a6f25b66 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/normal_compression_utils.h @@ -0,0 +1,360 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Utilities for converting unit vectors to octahedral coordinates and back. +// For more details about octahedral coordinates, see for example Cigolle +// et al.'14 “A Survey of Efficient Representations for Independent Unit +// Vectors”. +// +// In short this is motivated by an octahedron inscribed into a sphere. The +// direction of the normal vector can be defined by a point on the octahedron. +// On the right hemisphere (x > 0) this point is projected onto the x = 0 plane, +// that is, the right side of the octahedron forms a diamond like shape. The +// left side of the octahedron is also projected onto the x = 0 plane, however, +// in this case we flap the triangles of the diamond outward. Afterwards we +// shift the resulting square such that all values are positive. +// +// Important values in this file: +// * q: number of quantization bits +// * max_quantized_value: the max value representable with q bits (odd) +// * max_value: max value of the diamond = max_quantized_value - 1 (even) +// * center_value: center of the diamond after shift +// +// Note that the parameter space is somewhat periodic, e.g. (0, 0) == +// (max_value, max_value), which is also why the diamond is one smaller than the +// maximal representable value in order to have an odd range of values. + +#ifndef DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_ + +#include + +#include +#include + +#include "draco/core/macros.h" + +namespace draco { + +class OctahedronToolBox { + public: + OctahedronToolBox() + : quantization_bits_(-1), + max_quantized_value_(-1), + max_value_(-1), + dequantization_scale_(1.f), + center_value_(-1) {} + + bool SetQuantizationBits(int32_t q) { + if (q < 2 || q > 30) { + return false; + } + quantization_bits_ = q; + max_quantized_value_ = (1 << quantization_bits_) - 1; + max_value_ = max_quantized_value_ - 1; + dequantization_scale_ = 2.f / max_value_; + center_value_ = max_value_ / 2; + return true; + } + bool IsInitialized() const { return quantization_bits_ != -1; } + + // Convert all edge points in the top left and bottom right quadrants to + // their corresponding position in the bottom left and top right quadrants. + // Convert all corner edge points to the top right corner. + inline void CanonicalizeOctahedralCoords(int32_t s, int32_t t, int32_t *out_s, + int32_t *out_t) const { + if ((s == 0 && t == 0) || (s == 0 && t == max_value_) || + (s == max_value_ && t == 0)) { + s = max_value_; + t = max_value_; + } else if (s == 0 && t > center_value_) { + t = center_value_ - (t - center_value_); + } else if (s == max_value_ && t < center_value_) { + t = center_value_ + (center_value_ - t); + } else if (t == max_value_ && s < center_value_) { + s = center_value_ + (center_value_ - s); + } else if (t == 0 && s > center_value_) { + s = center_value_ - (s - center_value_); + } + + *out_s = s; + *out_t = t; + } + + // Converts an integer vector to octahedral coordinates. + // Precondition: |int_vec| abs sum must equal center value. + inline void IntegerVectorToQuantizedOctahedralCoords(const int32_t *int_vec, + int32_t *out_s, + int32_t *out_t) const { + DRACO_DCHECK_EQ( + std::abs(int_vec[0]) + std::abs(int_vec[1]) + std::abs(int_vec[2]), + center_value_); + int32_t s, t; + if (int_vec[0] >= 0) { + // Right hemisphere. + s = (int_vec[1] + center_value_); + t = (int_vec[2] + center_value_); + } else { + // Left hemisphere. + if (int_vec[1] < 0) { + s = std::abs(int_vec[2]); + } else { + s = (max_value_ - std::abs(int_vec[2])); + } + if (int_vec[2] < 0) { + t = std::abs(int_vec[1]); + } else { + t = (max_value_ - std::abs(int_vec[1])); + } + } + CanonicalizeOctahedralCoords(s, t, out_s, out_t); + } + + template + void FloatVectorToQuantizedOctahedralCoords(const T *vector, int32_t *out_s, + int32_t *out_t) const { + const double abs_sum = std::abs(static_cast(vector[0])) + + std::abs(static_cast(vector[1])) + + std::abs(static_cast(vector[2])); + + // Adjust values such that abs sum equals 1. + double scaled_vector[3]; + if (abs_sum > 1e-6) { + // Scale needed to project the vector to the surface of an octahedron. + const double scale = 1.0 / abs_sum; + scaled_vector[0] = vector[0] * scale; + scaled_vector[1] = vector[1] * scale; + scaled_vector[2] = vector[2] * scale; + } else { + scaled_vector[0] = 1.0; + scaled_vector[1] = 0; + scaled_vector[2] = 0; + } + + // Scale vector such that the sum equals the center value. + int32_t int_vec[3]; + int_vec[0] = + static_cast(floor(scaled_vector[0] * center_value_ + 0.5)); + int_vec[1] = + static_cast(floor(scaled_vector[1] * center_value_ + 0.5)); + // Make sure the sum is exactly the center value. + int_vec[2] = center_value_ - std::abs(int_vec[0]) - std::abs(int_vec[1]); + if (int_vec[2] < 0) { + // If the sum of first two coordinates is too large, we need to decrease + // the length of one of the coordinates. + if (int_vec[1] > 0) { + int_vec[1] += int_vec[2]; + } else { + int_vec[1] -= int_vec[2]; + } + int_vec[2] = 0; + } + // Take care of the sign. + if (scaled_vector[2] < 0) { + int_vec[2] *= -1; + } + + IntegerVectorToQuantizedOctahedralCoords(int_vec, out_s, out_t); + } + + // Normalize |vec| such that its abs sum is equal to the center value; + template + void CanonicalizeIntegerVector(T *vec) const { + static_assert(std::is_integral::value, "T must be an integral type."); + static_assert(std::is_signed::value, "T must be a signed type."); + const int64_t abs_sum = static_cast(std::abs(vec[0])) + + static_cast(std::abs(vec[1])) + + static_cast(std::abs(vec[2])); + + if (abs_sum == 0) { + vec[0] = center_value_; // vec[1] == v[2] == 0 + } else { + vec[0] = + (static_cast(vec[0]) * static_cast(center_value_)) / + abs_sum; + vec[1] = + (static_cast(vec[1]) * static_cast(center_value_)) / + abs_sum; + if (vec[2] >= 0) { + vec[2] = center_value_ - std::abs(vec[0]) - std::abs(vec[1]); + } else { + vec[2] = -(center_value_ - std::abs(vec[0]) - std::abs(vec[1])); + } + } + } + + inline void QuantizedOctahedralCoordsToUnitVector(int32_t in_s, int32_t in_t, + float *out_vector) const { + OctahedralCoordsToUnitVector(in_s * dequantization_scale_ - 1.f, + in_t * dequantization_scale_ - 1.f, + out_vector); + } + + // |s| and |t| are expected to be signed values. + inline bool IsInDiamond(const int32_t &s, const int32_t &t) const { + // Expect center already at origin. + DRACO_DCHECK_LE(s, center_value_); + DRACO_DCHECK_LE(t, center_value_); + DRACO_DCHECK_GE(s, -center_value_); + DRACO_DCHECK_GE(t, -center_value_); + return std::abs(s) + std::abs(t) <= center_value_; + } + + void InvertDiamond(int32_t *s, int32_t *t) const { + // Expect center already at origin. + DRACO_DCHECK_LE(*s, center_value_); + DRACO_DCHECK_LE(*t, center_value_); + DRACO_DCHECK_GE(*s, -center_value_); + DRACO_DCHECK_GE(*t, -center_value_); + int32_t sign_s = 0; + int32_t sign_t = 0; + if (*s >= 0 && *t >= 0) { + sign_s = 1; + sign_t = 1; + } else if (*s <= 0 && *t <= 0) { + sign_s = -1; + sign_t = -1; + } else { + sign_s = (*s > 0) ? 1 : -1; + sign_t = (*t > 0) ? 1 : -1; + } + + const int32_t corner_point_s = sign_s * center_value_; + const int32_t corner_point_t = sign_t * center_value_; + *s = 2 * *s - corner_point_s; + *t = 2 * *t - corner_point_t; + if (sign_s * sign_t >= 0) { + int32_t temp = *s; + *s = -*t; + *t = -temp; + } else { + std::swap(*s, *t); + } + *s = (*s + corner_point_s) / 2; + *t = (*t + corner_point_t) / 2; + } + + void InvertDirection(int32_t *s, int32_t *t) const { + // Expect center already at origin. + DRACO_DCHECK_LE(*s, center_value_); + DRACO_DCHECK_LE(*t, center_value_); + DRACO_DCHECK_GE(*s, -center_value_); + DRACO_DCHECK_GE(*t, -center_value_); + *s *= -1; + *t *= -1; + this->InvertDiamond(s, t); + } + + // For correction values. + int32_t ModMax(int32_t x) const { + if (x > this->center_value()) { + return x - this->max_quantized_value(); + } + if (x < -this->center_value()) { + return x + this->max_quantized_value(); + } + return x; + } + + // For correction values. + int32_t MakePositive(int32_t x) const { + DRACO_DCHECK_LE(x, this->center_value() * 2); + if (x < 0) { + return x + this->max_quantized_value(); + } + return x; + } + + int32_t quantization_bits() const { return quantization_bits_; } + int32_t max_quantized_value() const { return max_quantized_value_; } + int32_t max_value() const { return max_value_; } + int32_t center_value() const { return center_value_; } + + private: + inline void OctahedralCoordsToUnitVector(float in_s_scaled, float in_t_scaled, + float *out_vector) const { + // Background about the encoding: + // A normal is encoded in a normalized space depicted below. The + // encoding correponds to an octahedron that is unwrapped to a 2D plane. + // During encoding, a normal is projected to the surface of the octahedron + // and the projection is then unwrapped to the 2D plane. Decoding is the + // reverse of this process. + // All points in the central diamond are located on triangles on the + // right "hemisphere" of the octahedron while all points outside of the + // diamond are on the left hemisphere (basically, they would have to be + // wrapped along the diagonal edges to form the octahedron). The central + // point corresponds to the right most vertex of the octahedron and all + // corners of the plane correspond to the left most vertex of the + // octahedron. + // + // t + // ^ *-----*-----* + // | | /|\ | + // | / | \ | + // | / | \ | + // | / | \ | + // *-----*---- * + // | \ | / | + // | \ | / | + // | \ | / | + // | \|/ | + // *-----*-----* --> s + + // Note that the input |in_s_scaled| and |in_t_scaled| are already scaled to + // <-1, 1> range. This way, the central point is at coordinate (0, 0). + float y = in_s_scaled; + float z = in_t_scaled; + + // Remaining coordinate can be computed by projecting the (y, z) values onto + // the surface of the octahedron. + const float x = 1.f - abs(y) - abs(z); + + // |x| is essentially a signed distance from the diagonal edges of the + // diamond shown on the figure above. It is positive for all points in the + // diamond (right hemisphere) and negative for all points outside the + // diamond (left hemisphere). For all points on the left hemisphere we need + // to update their (y, z) coordinates to account for the wrapping along + // the edges of the diamond. + float x_offset = -x; + x_offset = x_offset < 0 ? 0 : x_offset; + + // This will do nothing for the points on the right hemisphere but it will + // mirror the (y, z) location along the nearest diagonal edge of the + // diamond. + y += y < 0 ? x_offset : -x_offset; + z += z < 0 ? x_offset : -x_offset; + + // Normalize the computed vector. + const float norm_squared = x * x + y * y + z * z; + if (norm_squared < 1e-6) { + out_vector[0] = 0; + out_vector[1] = 0; + out_vector[2] = 0; + } else { + const float d = 1.0f / std::sqrt(norm_squared); + out_vector[0] = x * d; + out_vector[1] = y * d; + out_vector[2] = z * d; + } + } + + int32_t quantization_bits_; + int32_t max_quantized_value_; + int32_t max_value_; + float dequantization_scale_; + int32_t center_value_; +}; +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_ diff --git a/contrib/draco/src/draco/compression/attributes/point_d_vector.h b/contrib/draco/src/draco/compression/attributes/point_d_vector.h new file mode 100644 index 000000000..3b115d500 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/point_d_vector.h @@ -0,0 +1,279 @@ +// Copyright 2018 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_ + +#include +#include +#include + +#include "draco/core/macros.h" + +namespace draco { + +// The main class of this file is PointDVector providing an interface similar to +// std::vector for arbitrary number of dimensions (without a template +// argument). PointDVectorIterator is a random access iterator, which allows for +// compatibility with existing algorithms. PseudoPointD provides for a view on +// the individual items in a contiguous block of memory, which is compatible +// with the swap function and is returned by a dereference of +// PointDVectorIterator. Swap functions provide for compatibility/specialization +// that allows these classes to work with currently utilized STL functions. + +// This class allows for swap functionality from the RandomIterator +// It seems problematic to bring this inside PointDVector due to templating. +template +class PseudoPointD { + public: + PseudoPointD(internal_t *mem, internal_t dimension) + : mem_(mem), dimension_(dimension) {} + + // Specifically copies referenced memory + void swap(PseudoPointD &other) noexcept { + for (internal_t dim = 0; dim < dimension_; dim += 1) { + std::swap(mem_[dim], other.mem_[dim]); + } + } + + PseudoPointD(const PseudoPointD &other) + : mem_(other.mem_), dimension_(other.dimension_) {} + + const internal_t &operator[](const size_t &n) const { + DRACO_DCHECK_LT(n, dimension_); + return mem_[n]; + } + internal_t &operator[](const size_t &n) { + DRACO_DCHECK_LT(n, dimension_); + return mem_[n]; + } + + bool operator==(const PseudoPointD &other) const { + for (auto dim = 0; dim < dimension_; dim += 1) { + if (mem_[dim] != other.mem_[dim]) { + return false; + } + } + return true; + } + bool operator!=(const PseudoPointD &other) const { + return !this->operator==(other); + } + + private: + internal_t *const mem_; + const internal_t dimension_; +}; + +// It seems problematic to bring this inside PointDVector due to templating. +template +void swap(draco::PseudoPointD &&a, + draco::PseudoPointD &&b) noexcept { + a.swap(b); +}; +template +void swap(draco::PseudoPointD &a, + draco::PseudoPointD &b) noexcept { + a.swap(b); +}; + +template +class PointDVector { + public: + PointDVector(const uint32_t n_items, const uint32_t dimensionality) + : n_items_(n_items), + dimensionality_(dimensionality), + item_size_bytes_(dimensionality * sizeof(internal_t)), + data_(n_items * dimensionality), + data0_(data_.data()) {} + // random access iterator + class PointDVectorIterator + : public std::iterator { + friend class PointDVector; + + public: + // std::iter_swap is called inside of std::partition and needs this + // specialized support + PseudoPointD operator*() const { + return PseudoPointD(vec_->data0_ + item_ * dimensionality_, + dimensionality_); + } + const PointDVectorIterator &operator++() { + item_ += 1; + return *this; + } + const PointDVectorIterator &operator--() { + item_ -= 1; + return *this; + } + PointDVectorIterator operator++(int32_t) { + PointDVectorIterator copy(*this); + item_ += 1; + return copy; + } + PointDVectorIterator operator--(int32_t) { + PointDVectorIterator copy(*this); + item_ -= 1; + return copy; + } + PointDVectorIterator &operator=(const PointDVectorIterator &other) { + this->item_ = other.item_; + return *this; + } + + bool operator==(const PointDVectorIterator &ref) const { + return item_ == ref.item_; + } + bool operator!=(const PointDVectorIterator &ref) const { + return item_ != ref.item_; + } + bool operator<(const PointDVectorIterator &ref) const { + return item_ < ref.item_; + } + bool operator>(const PointDVectorIterator &ref) const { + return item_ > ref.item_; + } + bool operator<=(const PointDVectorIterator &ref) const { + return item_ <= ref.item_; + } + bool operator>=(const PointDVectorIterator &ref) const { + return item_ >= ref.item_; + } + + PointDVectorIterator operator+(const int32_t &add) const { + PointDVectorIterator copy(vec_, item_ + add); + return copy; + } + PointDVectorIterator &operator+=(const int32_t &add) { + item_ += add; + return *this; + } + PointDVectorIterator operator-(const int32_t &sub) const { + PointDVectorIterator copy(vec_, item_ - sub); + return copy; + } + size_t operator-(const PointDVectorIterator &sub) const { + return (item_ - sub.item_); + } + + PointDVectorIterator &operator-=(const int32_t &sub) { + item_ -= sub; + return *this; + } + + internal_t *operator[](const size_t &n) const { + return vec_->data0_ + (item_ + n) * dimensionality_; + } + + protected: + explicit PointDVectorIterator(PointDVector *vec, size_t start_item) + : item_(start_item), vec_(vec), dimensionality_(vec->dimensionality_) {} + + private: + size_t item_; // this counts the item that should be referenced. + PointDVector *const vec_; // the thing that we're iterating on + const uint32_t dimensionality_; // local copy from vec_ + }; + + PointDVectorIterator begin() { return PointDVectorIterator(this, 0); } + PointDVectorIterator end() { return PointDVectorIterator(this, n_items_); } + + // operator[] allows for unprotected user-side usage of operator[] on the + // return value AS IF it were a natively indexable type like Point3* + internal_t *operator[](const uint32_t index) { + DRACO_DCHECK_LT(index, n_items_); + return data0_ + index * dimensionality_; + } + const internal_t *operator[](const uint32_t index) const { + DRACO_DCHECK_LT(index, n_items_); + return data0_ + index * dimensionality_; + } + + uint32_t size() const { return n_items_; } + size_t GetBufferSize() const { return data_.size(); } + + // copy a single contiguous 'item' from one PointDVector into this one. + void CopyItem(const PointDVector &source, const internal_t source_index, + const internal_t destination_index) { + DRACO_DCHECK(&source != this || + (&source == this && source_index != destination_index)); + DRACO_DCHECK_LT(destination_index, n_items_); + DRACO_DCHECK_LT(source_index, source.n_items_); + + // DRACO_DCHECK_EQ(source.n_items_, n_items_); // not technically necessary + DRACO_DCHECK_EQ(source.dimensionality_, dimensionality_); + + const internal_t *ref = source[source_index]; + internal_t *const dest = this->operator[](destination_index); + std::memcpy(dest, ref, item_size_bytes_); + } + + // Copy data directly off of an attribute buffer interleaved into internal + // memory. + void CopyAttribute( + // The dimensionality of the attribute being integrated + const internal_t attribute_dimensionality, + // The offset in dimensions to insert this attribute. + const internal_t offset_dimensionality, const internal_t index, + // The direct pointer to the data + const void *const attribute_item_data) { + // chunk copy + const size_t copy_size = sizeof(internal_t) * attribute_dimensionality; + + // a multiply and add can be optimized away with an iterator + std::memcpy(data0_ + index * dimensionality_ + offset_dimensionality, + attribute_item_data, copy_size); + } + // Copy data off of a contiguous buffer interleaved into internal memory + void CopyAttribute( + // The dimensionality of the attribute being integrated + const internal_t attribute_dimensionality, + // The offset in dimensions to insert this attribute. + const internal_t offset_dimensionality, + const internal_t *const attribute_mem) { + DRACO_DCHECK_LT(offset_dimensionality, + dimensionality_ - attribute_dimensionality); + // degenerate case block copy the whole buffer. + if (dimensionality_ == attribute_dimensionality) { + DRACO_DCHECK_EQ(offset_dimensionality, 0); + const size_t copy_size = + sizeof(internal_t) * attribute_dimensionality * n_items_; + std::memcpy(data0_, attribute_mem, copy_size); + } else { // chunk copy + const size_t copy_size = sizeof(internal_t) * attribute_dimensionality; + internal_t *internal_data; + const internal_t *attribute_data; + internal_t item; + for (internal_data = data0_ + offset_dimensionality, + attribute_data = attribute_mem, item = 0; + item < n_items_; internal_data += dimensionality_, + attribute_data += attribute_dimensionality, item += 1) { + std::memcpy(internal_data, attribute_data, copy_size); + } + } + } + + private: + // internal parameters. + const uint32_t n_items_; + const uint32_t dimensionality_; // The dimension of the points in the buffer + const uint32_t item_size_bytes_; + std::vector data_; // contiguously stored data. Never resized. + internal_t *const data0_; // raw pointer to base data. +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_ diff --git a/contrib/draco/src/draco/compression/attributes/point_d_vector_test.cc b/contrib/draco/src/draco/compression/attributes/point_d_vector_test.cc new file mode 100644 index 000000000..59f28f80b --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/point_d_vector_test.cc @@ -0,0 +1,360 @@ +// Copyright 2018 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/point_d_vector.h" + +#include "draco/compression/point_cloud/algorithms/point_cloud_types.h" +#include "draco/core/draco_test_base.h" + +namespace draco { + +class PointDVectorTest : public ::testing::Test { + protected: + template + void TestIntegrity() {} + template + void TestSize() { + for (uint32_t n_items = 0; n_items <= 10; ++n_items) { + for (uint32_t dimensionality = 1; dimensionality <= 10; + ++dimensionality) { + draco::PointDVector var(n_items, dimensionality); + ASSERT_EQ(n_items, var.size()); + ASSERT_EQ(n_items * dimensionality, var.GetBufferSize()); + } + } + } + template + void TestContentsContiguous() { + for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) { + for (uint32_t dimensionality = 1; dimensionality < 10; + dimensionality += 2) { + for (uint32_t att_dimensionality = 1; + att_dimensionality <= dimensionality; att_dimensionality += 2) { + for (uint32_t offset_dimensionality = 0; + offset_dimensionality < dimensionality - att_dimensionality; + ++offset_dimensionality) { + PointDVector var(n_items, dimensionality); + + std::vector att(n_items * att_dimensionality); + for (PT val = 0; val < n_items; val += 1) { + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + att[val * att_dimensionality + att_dim] = val; + } + } + const PT *const attribute_data = att.data(); + + var.CopyAttribute(att_dimensionality, offset_dimensionality, + attribute_data); + + for (PT val = 0; val < n_items; val += 1) { + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + ASSERT_EQ(var[val][offset_dimensionality + att_dim], val); + } + } + } + } + } + } + } + template + void TestContentsDiscrete() { + for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) { + for (uint32_t dimensionality = 1; dimensionality < 10; + dimensionality += 2) { + for (uint32_t att_dimensionality = 1; + att_dimensionality <= dimensionality; att_dimensionality += 2) { + for (uint32_t offset_dimensionality = 0; + offset_dimensionality < dimensionality - att_dimensionality; + ++offset_dimensionality) { + PointDVector var(n_items, dimensionality); + + std::vector att(n_items * att_dimensionality); + for (PT val = 0; val < n_items; val += 1) { + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + att[val * att_dimensionality + att_dim] = val; + } + } + const PT *const attribute_data = att.data(); + + for (PT item = 0; item < n_items; item += 1) { + var.CopyAttribute(att_dimensionality, offset_dimensionality, item, + attribute_data + item * att_dimensionality); + } + + for (PT val = 0; val < n_items; val += 1) { + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + ASSERT_EQ(var[val][offset_dimensionality + att_dim], val); + } + } + } + } + } + } + } + + template + void TestContentsCopy() { + for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) { + for (uint32_t dimensionality = 1; dimensionality < 10; + dimensionality += 2) { + for (uint32_t att_dimensionality = 1; + att_dimensionality <= dimensionality; att_dimensionality += 2) { + for (uint32_t offset_dimensionality = 0; + offset_dimensionality < dimensionality - att_dimensionality; + ++offset_dimensionality) { + PointDVector var(n_items, dimensionality); + PointDVector dest(n_items, dimensionality); + + std::vector att(n_items * att_dimensionality); + for (PT val = 0; val < n_items; val += 1) { + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + att[val * att_dimensionality + att_dim] = val; + } + } + const PT *const attribute_data = att.data(); + + var.CopyAttribute(att_dimensionality, offset_dimensionality, + attribute_data); + + for (PT item = 0; item < n_items; item += 1) { + dest.CopyItem(var, item, item); + } + + for (PT val = 0; val < n_items; val += 1) { + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + ASSERT_EQ(var[val][offset_dimensionality + att_dim], val); + ASSERT_EQ(dest[val][offset_dimensionality + att_dim], val); + } + } + } + } + } + } + } + template + void TestIterator() { + for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) { + for (uint32_t dimensionality = 1; dimensionality < 10; + dimensionality += 2) { + for (uint32_t att_dimensionality = 1; + att_dimensionality <= dimensionality; att_dimensionality += 2) { + for (uint32_t offset_dimensionality = 0; + offset_dimensionality < dimensionality - att_dimensionality; + ++offset_dimensionality) { + PointDVector var(n_items, dimensionality); + PointDVector dest(n_items, dimensionality); + + std::vector att(n_items * att_dimensionality); + for (PT val = 0; val < n_items; val += 1) { + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + att[val * att_dimensionality + att_dim] = val; + } + } + const PT *const attribute_data = att.data(); + + var.CopyAttribute(att_dimensionality, offset_dimensionality, + attribute_data); + + for (PT item = 0; item < n_items; item += 1) { + dest.CopyItem(var, item, item); + } + + auto V0 = var.begin(); + auto VE = var.end(); + auto D0 = dest.begin(); + auto DE = dest.end(); + + while (V0 != VE && D0 != DE) { + ASSERT_EQ(*D0, *V0); // compare PseudoPointD + // verify elemental values + for (auto index = 0; index < dimensionality; index += 1) { + ASSERT_EQ((*D0)[index], (*V0)[index]); + } + ++V0; + ++D0; + } + + for (PT val = 0; val < n_items; val += 1) { + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + ASSERT_EQ(var[val][offset_dimensionality + att_dim], val); + ASSERT_EQ(dest[val][offset_dimensionality + att_dim], val); + } + } + } + } + } + } + } + template + void TestPoint3Iterator() { + for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) { + const uint32_t dimensionality = 3; + // for (uint32_t dimensionality = 1; dimensionality < 10; + // dimensionality += 2) { + const uint32_t att_dimensionality = 3; + // for (uint32_t att_dimensionality = 1; + // att_dimensionality <= dimensionality; att_dimensionality += 2) { + for (uint32_t offset_dimensionality = 0; + offset_dimensionality < dimensionality - att_dimensionality; + ++offset_dimensionality) { + PointDVector var(n_items, dimensionality); + PointDVector dest(n_items, dimensionality); + + std::vector att(n_items * att_dimensionality); + std::vector att3(n_items); + for (PT val = 0; val < n_items; val += 1) { + att3[val][0] = val; + att3[val][1] = val; + att3[val][2] = val; + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + att[val * att_dimensionality + att_dim] = val; + } + } + const PT *const attribute_data = att.data(); + + var.CopyAttribute(att_dimensionality, offset_dimensionality, + attribute_data); + + for (PT item = 0; item < n_items; item += 1) { + dest.CopyItem(var, item, item); + } + + auto aV0 = att3.begin(); + auto aVE = att3.end(); + auto V0 = var.begin(); + auto VE = var.end(); + auto D0 = dest.begin(); + auto DE = dest.end(); + + while (aV0 != aVE && V0 != VE && D0 != DE) { + ASSERT_EQ(*D0, *V0); // compare PseudoPointD + // verify elemental values + for (auto index = 0; index < dimensionality; index += 1) { + ASSERT_EQ((*D0)[index], (*V0)[index]); + ASSERT_EQ((*D0)[index], (*aV0)[index]); + ASSERT_EQ((*aV0)[index], (*V0)[index]); + } + ++aV0; + ++V0; + ++D0; + } + + for (PT val = 0; val < n_items; val += 1) { + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + ASSERT_EQ(var[val][offset_dimensionality + att_dim], val); + ASSERT_EQ(dest[val][offset_dimensionality + att_dim], val); + } + } + } + } + } + + void TestPseudoPointDSwap() { + draco::Point3ui val = {0, 1, 2}; + draco::Point3ui dest = {10, 11, 12}; + draco::PseudoPointD val_src1(&val[0], 3); + draco::PseudoPointD dest_src1(&dest[0], 3); + + ASSERT_EQ(val_src1[0], 0); + ASSERT_EQ(val_src1[1], 1); + ASSERT_EQ(val_src1[2], 2); + ASSERT_EQ(dest_src1[0], 10); + ASSERT_EQ(dest_src1[1], 11); + ASSERT_EQ(dest_src1[2], 12); + + ASSERT_NE(val_src1, dest_src1); + + swap(val_src1, dest_src1); + + ASSERT_EQ(dest_src1[0], 0); + ASSERT_EQ(dest_src1[1], 1); + ASSERT_EQ(dest_src1[2], 2); + ASSERT_EQ(val_src1[0], 10); + ASSERT_EQ(val_src1[1], 11); + ASSERT_EQ(val_src1[2], 12); + + ASSERT_NE(val_src1, dest_src1); + } + void TestPseudoPointDEquality() { + draco::Point3ui val = {0, 1, 2}; + draco::Point3ui dest = {0, 1, 2}; + draco::PseudoPointD val_src1(&val[0], 3); + draco::PseudoPointD val_src2(&val[0], 3); + draco::PseudoPointD dest_src1(&dest[0], 3); + draco::PseudoPointD dest_src2(&dest[0], 3); + + ASSERT_EQ(val_src1, val_src1); + ASSERT_EQ(val_src1, val_src2); + ASSERT_EQ(dest_src1, val_src1); + ASSERT_EQ(dest_src1, val_src2); + ASSERT_EQ(val_src2, val_src1); + ASSERT_EQ(val_src2, val_src2); + ASSERT_EQ(dest_src2, val_src1); + ASSERT_EQ(dest_src2, val_src2); + + for (auto i = 0; i < 3; i++) { + ASSERT_EQ(val_src1[i], val_src1[i]); + ASSERT_EQ(val_src1[i], val_src2[i]); + ASSERT_EQ(dest_src1[i], val_src1[i]); + ASSERT_EQ(dest_src1[i], val_src2[i]); + ASSERT_EQ(val_src2[i], val_src1[i]); + ASSERT_EQ(val_src2[i], val_src2[i]); + ASSERT_EQ(dest_src2[i], val_src1[i]); + ASSERT_EQ(dest_src2[i], val_src2[i]); + } + } + void TestPseudoPointDInequality() { + draco::Point3ui val = {0, 1, 2}; + draco::Point3ui dest = {1, 2, 3}; + draco::PseudoPointD val_src1(&val[0], 3); + draco::PseudoPointD val_src2(&val[0], 3); + draco::PseudoPointD dest_src1(&dest[0], 3); + draco::PseudoPointD dest_src2(&dest[0], 3); + + ASSERT_EQ(val_src1, val_src1); + ASSERT_EQ(val_src1, val_src2); + ASSERT_NE(dest_src1, val_src1); + ASSERT_NE(dest_src1, val_src2); + ASSERT_EQ(val_src2, val_src1); + ASSERT_EQ(val_src2, val_src2); + ASSERT_NE(dest_src2, val_src1); + ASSERT_NE(dest_src2, val_src2); + + for (auto i = 0; i < 3; i++) { + ASSERT_EQ(val_src1[i], val_src1[i]); + ASSERT_EQ(val_src1[i], val_src2[i]); + ASSERT_NE(dest_src1[i], val_src1[i]); + ASSERT_NE(dest_src1[i], val_src2[i]); + ASSERT_EQ(val_src2[i], val_src1[i]); + ASSERT_EQ(val_src2[i], val_src2[i]); + ASSERT_NE(dest_src2[i], val_src1[i]); + ASSERT_NE(dest_src2[i], val_src2[i]); + } + } +}; + +TEST_F(PointDVectorTest, VectorTest) { + TestSize(); + TestContentsDiscrete(); + TestContentsContiguous(); + TestContentsCopy(); + TestIterator(); + TestPoint3Iterator(); +} +TEST_F(PointDVectorTest, PseudoPointDTest) { + TestPseudoPointDSwap(); + TestPseudoPointDEquality(); + TestPseudoPointDInequality(); +} +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/points_sequencer.h b/contrib/draco/src/draco/compression/attributes/points_sequencer.h new file mode 100644 index 000000000..2f4f7e16d --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/points_sequencer.h @@ -0,0 +1,63 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_POINTS_SEQUENCER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_POINTS_SEQUENCER_H_ + +#include + +#include "draco/attributes/point_attribute.h" + +namespace draco { + +// Class for generating a sequence of point ids that can be used to encode +// or decode attribute values in a specific order. +// See sequential_attribute_encoders/decoders_controller.h for more details. +class PointsSequencer { + public: + PointsSequencer() : out_point_ids_(nullptr) {} + virtual ~PointsSequencer() = default; + + // Fills the |out_point_ids| with the generated sequence of point ids. + bool GenerateSequence(std::vector *out_point_ids) { + out_point_ids_ = out_point_ids; + return GenerateSequenceInternal(); + } + + // Appends a point to the sequence. + void AddPointId(PointIndex point_id) { out_point_ids_->push_back(point_id); } + + // Sets the correct mapping between point ids and value ids. I.e., the inverse + // of the |out_point_ids|. In general, |out_point_ids_| does not contain + // sufficient information to compute the inverse map, because not all point + // ids are necessarily contained within the map. + // Must be implemented for sequencers that are used by attribute decoders. + virtual bool UpdatePointToAttributeIndexMapping(PointAttribute * /* attr */) { + return false; + } + + protected: + // Method that needs to be implemented by the derived classes. The + // implementation is responsible for filling |out_point_ids_| with the valid + // sequence of point ids. + virtual bool GenerateSequenceInternal() = 0; + std::vector *out_point_ids() const { return out_point_ids_; } + + private: + std::vector *out_point_ids_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_POINTS_SEQUENCER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h new file mode 100644 index 000000000..36c124baa --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h @@ -0,0 +1,231 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_DECODER_H_ + +#include +#include + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h" +#include "draco/compression/bit_coders/rans_bit_decoder.h" +#include "draco/core/varint_decoding.h" +#include "draco/draco_features.h" + +namespace draco { + +// Decoder for predictions encoded with the constrained multi-parallelogram +// encoder. See the corresponding encoder for more details about the prediction +// method. +template +class MeshPredictionSchemeConstrainedMultiParallelogramDecoder + : public MeshPredictionSchemeDecoder { + public: + using CorrType = + typename PredictionSchemeDecoder::CorrType; + using CornerTable = typename MeshDataT::CornerTable; + + explicit MeshPredictionSchemeConstrainedMultiParallelogramDecoder( + const PointAttribute *attribute) + : MeshPredictionSchemeDecoder( + attribute), + selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {} + MeshPredictionSchemeConstrainedMultiParallelogramDecoder( + const PointAttribute *attribute, const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeDecoder( + attribute, transform, mesh_data), + selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {} + + bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int size, int num_components, + const PointIndex *entry_to_point_id_map) override; + + bool DecodePredictionData(DecoderBuffer *buffer) override; + + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM; + } + + bool IsInitialized() const override { + return this->mesh_data().IsInitialized(); + } + + private: + typedef constrained_multi_parallelogram::Mode Mode; + static constexpr int kMaxNumParallelograms = + constrained_multi_parallelogram::kMaxNumParallelograms; + // Crease edges are used to store whether any given edge should be used for + // parallelogram prediction or not. New values are added in the order in which + // the edges are processed. For better compression, the flags are stored in + // in separate contexts based on the number of available parallelograms at a + // given vertex. + std::vector is_crease_edge_[kMaxNumParallelograms]; + Mode selected_mode_; +}; + +template +bool MeshPredictionSchemeConstrainedMultiParallelogramDecoder< + DataTypeT, TransformT, MeshDataT>:: + ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int /* size */, int num_components, + const PointIndex * /* entry_to_point_id_map */) { + this->transform().Init(num_components); + + // Predicted values for all simple parallelograms encountered at any given + // vertex. + std::vector pred_vals[kMaxNumParallelograms]; + for (int i = 0; i < kMaxNumParallelograms; ++i) { + pred_vals[i].resize(num_components, 0); + } + this->transform().ComputeOriginalValue(pred_vals[0].data(), in_corr, + out_data); + + const CornerTable *const table = this->mesh_data().corner_table(); + const std::vector *const vertex_to_data_map = + this->mesh_data().vertex_to_data_map(); + + // Current position in the |is_crease_edge_| array for each context. + std::vector is_crease_edge_pos(kMaxNumParallelograms, 0); + + // Used to store predicted value for multi-parallelogram prediction. + std::vector multi_pred_vals(num_components); + + const int corner_map_size = + static_cast(this->mesh_data().data_to_corner_map()->size()); + for (int p = 1; p < corner_map_size; ++p) { + const CornerIndex start_corner_id = + this->mesh_data().data_to_corner_map()->at(p); + + CornerIndex corner_id(start_corner_id); + int num_parallelograms = 0; + bool first_pass = true; + while (corner_id != kInvalidCornerIndex) { + if (ComputeParallelogramPrediction( + p, corner_id, table, *vertex_to_data_map, out_data, + num_components, &(pred_vals[num_parallelograms][0]))) { + // Parallelogram prediction applied and stored in + // |pred_vals[num_parallelograms]| + ++num_parallelograms; + // Stop processing when we reach the maximum number of allowed + // parallelograms. + if (num_parallelograms == kMaxNumParallelograms) { + break; + } + } + + // Proceed to the next corner attached to the vertex. First swing left + // and if we reach a boundary, swing right from the start corner. + if (first_pass) { + corner_id = table->SwingLeft(corner_id); + } else { + corner_id = table->SwingRight(corner_id); + } + if (corner_id == start_corner_id) { + break; + } + if (corner_id == kInvalidCornerIndex && first_pass) { + first_pass = false; + corner_id = table->SwingRight(start_corner_id); + } + } + + // Check which of the available parallelograms are actually used and compute + // the final predicted value. + int num_used_parallelograms = 0; + if (num_parallelograms > 0) { + for (int i = 0; i < num_components; ++i) { + multi_pred_vals[i] = 0; + } + // Check which parallelograms are actually used. + for (int i = 0; i < num_parallelograms; ++i) { + const int context = num_parallelograms - 1; + const int pos = is_crease_edge_pos[context]++; + if (is_crease_edge_[context].size() <= pos) { + return false; + } + const bool is_crease = is_crease_edge_[context][pos]; + if (!is_crease) { + ++num_used_parallelograms; + for (int j = 0; j < num_components; ++j) { + multi_pred_vals[j] += pred_vals[i][j]; + } + } + } + } + const int dst_offset = p * num_components; + if (num_used_parallelograms == 0) { + // No parallelogram was valid. + // We use the last decoded point as a reference. + const int src_offset = (p - 1) * num_components; + this->transform().ComputeOriginalValue( + out_data + src_offset, in_corr + dst_offset, out_data + dst_offset); + } else { + // Compute the correction from the predicted value. + for (int c = 0; c < num_components; ++c) { + multi_pred_vals[c] /= num_used_parallelograms; + } + this->transform().ComputeOriginalValue( + multi_pred_vals.data(), in_corr + dst_offset, out_data + dst_offset); + } + } + return true; +} + +template +bool MeshPredictionSchemeConstrainedMultiParallelogramDecoder< + DataTypeT, TransformT, MeshDataT>::DecodePredictionData(DecoderBuffer + *buffer) { +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) { + // Decode prediction mode. + uint8_t mode; + if (!buffer->Decode(&mode)) { + return false; + } + + if (mode != Mode::OPTIMAL_MULTI_PARALLELOGRAM) { + // Unsupported mode. + return false; + } + } +#endif + + // Encode selected edges using separate rans bit coder for each context. + for (int i = 0; i < kMaxNumParallelograms; ++i) { + uint32_t num_flags; + if (!DecodeVarint(&num_flags, buffer)) { + return false; + } + if (num_flags > 0) { + is_crease_edge_[i].resize(num_flags); + RAnsBitDecoder decoder; + if (!decoder.StartDecoding(buffer)) { + return false; + } + for (uint32_t j = 0; j < num_flags; ++j) { + is_crease_edge_[i][j] = decoder.DecodeNextBit(); + } + decoder.EndDecoding(); + } + } + return MeshPredictionSchemeDecoder::DecodePredictionData(buffer); +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h new file mode 100644 index 000000000..77df8ee24 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h @@ -0,0 +1,414 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_ENCODER_H_ + +#include +#include + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h" +#include "draco/compression/bit_coders/rans_bit_encoder.h" +#include "draco/compression/entropy/shannon_entropy.h" +#include "draco/core/varint_encoding.h" + +namespace draco { + +// Compared to standard multi-parallelogram, constrained multi-parallelogram can +// explicitly select which of the available parallelograms are going to be used +// for the prediction by marking crease edges between two triangles. This +// requires storing extra data, but it allows the predictor to avoid using +// parallelograms that would lead to poor predictions. For improved efficiency, +// our current implementation limits the maximum number of used parallelograms +// to four, which covers >95% of the cases (on average, there are only two +// parallelograms available for any given vertex). +// All bits of the explicitly chosen configuration are stored together in a +// single context chosen by the total number of parallelograms available to +// choose from. +template +class MeshPredictionSchemeConstrainedMultiParallelogramEncoder + : public MeshPredictionSchemeEncoder { + public: + using CorrType = + typename PredictionSchemeEncoder::CorrType; + using CornerTable = typename MeshDataT::CornerTable; + + explicit MeshPredictionSchemeConstrainedMultiParallelogramEncoder( + const PointAttribute *attribute) + : MeshPredictionSchemeEncoder( + attribute), + selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {} + MeshPredictionSchemeConstrainedMultiParallelogramEncoder( + const PointAttribute *attribute, const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeEncoder( + attribute, transform, mesh_data), + selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {} + + bool ComputeCorrectionValues( + const DataTypeT *in_data, CorrType *out_corr, int size, + int num_components, const PointIndex *entry_to_point_id_map) override; + + bool EncodePredictionData(EncoderBuffer *buffer) override; + + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM; + } + + bool IsInitialized() const override { + return this->mesh_data().IsInitialized(); + } + + private: + // Function used to compute number of bits needed to store overhead of the + // predictor. In this case, we consider overhead to be all bits that mark + // whether a parallelogram should be used for prediction or not. The input + // to this method is the total number of parallelograms that were evaluated so + // far(total_parallelogram), and the number of parallelograms we decided to + // use for prediction (total_used_parallelograms). + // Returns number of bits required to store the overhead. + int64_t ComputeOverheadBits(int64_t total_used_parallelograms, + int64_t total_parallelogram) const { + // For now we assume RAns coding for the bits where the total required size + // is directly correlated to the binary entropy of the input stream. + // TODO(ostava): This should be generalized in case we use other binary + // coding scheme. + const double entropy = ComputeBinaryShannonEntropy( + static_cast(total_parallelogram), + static_cast(total_used_parallelograms)); + + // Round up to the nearest full bit. + return static_cast( + ceil(static_cast(total_parallelogram) * entropy)); + } + + // Struct that contains data used for measuring the error of each available + // parallelogram configuration. + struct Error { + Error() : num_bits(0), residual_error(0) {} + + // Primary metric: number of bits required to store the data as a result of + // the selected prediction configuration. + int num_bits; + // Secondary metric: absolute difference of residuals for the given + // configuration. + int residual_error; + + bool operator<(const Error &e) const { + if (num_bits < e.num_bits) { + return true; + } + if (num_bits > e.num_bits) { + return false; + } + return residual_error < e.residual_error; + } + }; + + // Computes error for predicting |predicted_val| instead of |actual_val|. + // Error is computed as the number of bits needed to encode the difference + // between the values. + Error ComputeError(const DataTypeT *predicted_val, + const DataTypeT *actual_val, int *out_residuals, + int num_components) { + Error error; + + for (int i = 0; i < num_components; ++i) { + const int dif = (predicted_val[i] - actual_val[i]); + error.residual_error += std::abs(dif); + out_residuals[i] = dif; + // Entropy needs unsigned symbols, so convert the signed difference to an + // unsigned symbol. + entropy_symbols_[i] = ConvertSignedIntToSymbol(dif); + } + + // Generate entropy data for case that this configuration was used. + // Note that the entropy stream is NOT updated in this case. + const auto entropy_data = + entropy_tracker_.Peek(entropy_symbols_.data(), num_components); + + error.num_bits = entropy_tracker_.GetNumberOfDataBits(entropy_data) + + entropy_tracker_.GetNumberOfRAnsTableBits(entropy_data); + return error; + } + + typedef constrained_multi_parallelogram::Mode Mode; + static constexpr int kMaxNumParallelograms = + constrained_multi_parallelogram::kMaxNumParallelograms; + // Crease edges are used to store whether any given edge should be used for + // parallelogram prediction or not. New values are added in the order in which + // the edges are processed. For better compression, the flags are stored in + // in separate contexts based on the number of available parallelograms at a + // given vertex. + // TODO(draco-eng) reconsider std::vector (performance/space). + std::vector is_crease_edge_[kMaxNumParallelograms]; + Mode selected_mode_; + + ShannonEntropyTracker entropy_tracker_; + + // Temporary storage for symbols that are fed into the |entropy_stream|. + // Always contains only |num_components| entries. + std::vector entropy_symbols_; +}; + +template +bool MeshPredictionSchemeConstrainedMultiParallelogramEncoder< + DataTypeT, TransformT, MeshDataT>:: + ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr, + int size, int num_components, + const PointIndex * /* entry_to_point_id_map */) { + this->transform().Init(in_data, size, num_components); + const CornerTable *const table = this->mesh_data().corner_table(); + const std::vector *const vertex_to_data_map = + this->mesh_data().vertex_to_data_map(); + + // Predicted values for all simple parallelograms encountered at any given + // vertex. + std::vector pred_vals[kMaxNumParallelograms]; + for (int i = 0; i < kMaxNumParallelograms; ++i) { + pred_vals[i].resize(num_components); + } + // Used to store predicted value for various multi-parallelogram predictions + // (combinations of simple parallelogram predictions). + std::vector multi_pred_vals(num_components); + entropy_symbols_.resize(num_components); + + // Struct for holding data about prediction configuration for different sets + // of used parallelograms. + struct PredictionConfiguration { + PredictionConfiguration() + : error(), configuration(0), num_used_parallelograms(0) {} + Error error; + uint8_t configuration; // Bitfield, 1 use parallelogram, 0 don't use it. + int num_used_parallelograms; + std::vector predicted_value; + std::vector residuals; + }; + + // Bit-field used for computing permutations of excluded edges + // (parallelograms). + bool exluded_parallelograms[kMaxNumParallelograms]; + + // Data about the number of used parallelogram and total number of available + // parallelogram for each context. Used to compute overhead needed for storing + // the parallelogram choices made by the encoder. + int64_t total_used_parallelograms[kMaxNumParallelograms] = {0}; + int64_t total_parallelograms[kMaxNumParallelograms] = {0}; + + std::vector current_residuals(num_components); + + // We start processing the vertices from the end because this prediction uses + // data from previous entries that could be overwritten when an entry is + // processed. + for (int p = + static_cast(this->mesh_data().data_to_corner_map()->size()) - 1; + p > 0; --p) { + const CornerIndex start_corner_id = + this->mesh_data().data_to_corner_map()->at(p); + + // Go over all corners attached to the vertex and compute the predicted + // value from the parallelograms defined by their opposite faces. + CornerIndex corner_id(start_corner_id); + int num_parallelograms = 0; + bool first_pass = true; + while (corner_id != kInvalidCornerIndex) { + if (ComputeParallelogramPrediction( + p, corner_id, table, *vertex_to_data_map, in_data, num_components, + &(pred_vals[num_parallelograms][0]))) { + // Parallelogram prediction applied and stored in + // |pred_vals[num_parallelograms]| + ++num_parallelograms; + // Stop processing when we reach the maximum number of allowed + // parallelograms. + if (num_parallelograms == kMaxNumParallelograms) { + break; + } + } + + // Proceed to the next corner attached to the vertex. First swing left + // and if we reach a boundary, swing right from the start corner. + if (first_pass) { + corner_id = table->SwingLeft(corner_id); + } else { + corner_id = table->SwingRight(corner_id); + } + if (corner_id == start_corner_id) { + break; + } + if (corner_id == kInvalidCornerIndex && first_pass) { + first_pass = false; + corner_id = table->SwingRight(start_corner_id); + } + } + + // Offset to the target (destination) vertex. + const int dst_offset = p * num_components; + Error error; + + // Compute all prediction errors for all possible configurations of + // available parallelograms. + + // Variable for holding the best configuration that has been found so far. + PredictionConfiguration best_prediction; + + // Compute delta coding error (configuration when no parallelogram is + // selected). + const int src_offset = (p - 1) * num_components; + error = ComputeError(in_data + src_offset, in_data + dst_offset, + ¤t_residuals[0], num_components); + + if (num_parallelograms > 0) { + total_parallelograms[num_parallelograms - 1] += num_parallelograms; + const int64_t new_overhead_bits = + ComputeOverheadBits(total_used_parallelograms[num_parallelograms - 1], + total_parallelograms[num_parallelograms - 1]); + error.num_bits += new_overhead_bits; + } + + best_prediction.error = error; + best_prediction.configuration = 0; + best_prediction.num_used_parallelograms = 0; + best_prediction.predicted_value.assign( + in_data + src_offset, in_data + src_offset + num_components); + best_prediction.residuals.assign(current_residuals.begin(), + current_residuals.end()); + + // Compute prediction error for different cases of used parallelograms. + for (int num_used_parallelograms = 1; + num_used_parallelograms <= num_parallelograms; + ++num_used_parallelograms) { + // Mark all parallelograms as excluded. + std::fill(exluded_parallelograms, + exluded_parallelograms + num_parallelograms, true); + // TODO(draco-eng) maybe this should be another std::fill. + // Mark the first |num_used_parallelograms| as not excluded. + for (int j = 0; j < num_used_parallelograms; ++j) { + exluded_parallelograms[j] = false; + } + // Permute over the excluded edges and compute error for each + // configuration (permutation of excluded parallelograms). + do { + // Reset the multi-parallelogram predicted values. + for (int j = 0; j < num_components; ++j) { + multi_pred_vals[j] = 0; + } + uint8_t configuration = 0; + for (int j = 0; j < num_parallelograms; ++j) { + if (exluded_parallelograms[j]) { + continue; + } + for (int c = 0; c < num_components; ++c) { + multi_pred_vals[c] += pred_vals[j][c]; + } + // Set jth bit of the configuration. + configuration |= (1 << j); + } + + for (int j = 0; j < num_components; ++j) { + multi_pred_vals[j] /= num_used_parallelograms; + } + error = ComputeError(multi_pred_vals.data(), in_data + dst_offset, + ¤t_residuals[0], num_components); + if (num_parallelograms > 0) { + const int64_t new_overhead_bits = ComputeOverheadBits( + total_used_parallelograms[num_parallelograms - 1] + + num_used_parallelograms, + total_parallelograms[num_parallelograms - 1]); + + // Add overhead bits to the total error. + error.num_bits += new_overhead_bits; + } + if (error < best_prediction.error) { + best_prediction.error = error; + best_prediction.configuration = configuration; + best_prediction.num_used_parallelograms = num_used_parallelograms; + best_prediction.predicted_value.assign(multi_pred_vals.begin(), + multi_pred_vals.end()); + best_prediction.residuals.assign(current_residuals.begin(), + current_residuals.end()); + } + } while (std::next_permutation( + exluded_parallelograms, exluded_parallelograms + num_parallelograms)); + } + if (num_parallelograms > 0) { + total_used_parallelograms[num_parallelograms - 1] += + best_prediction.num_used_parallelograms; + } + + // Update the entropy stream by adding selected residuals as symbols to the + // stream. + for (int i = 0; i < num_components; ++i) { + entropy_symbols_[i] = + ConvertSignedIntToSymbol(best_prediction.residuals[i]); + } + entropy_tracker_.Push(entropy_symbols_.data(), num_components); + + for (int i = 0; i < num_parallelograms; ++i) { + if ((best_prediction.configuration & (1 << i)) == 0) { + // Parallelogram not used, mark the edge as crease. + is_crease_edge_[num_parallelograms - 1].push_back(true); + } else { + // Parallelogram used. Add it to the predicted value and mark the + // edge as not a crease. + is_crease_edge_[num_parallelograms - 1].push_back(false); + } + } + this->transform().ComputeCorrection(in_data + dst_offset, + best_prediction.predicted_value.data(), + out_corr + dst_offset); + } + // First element is always fixed because it cannot be predicted. + for (int i = 0; i < num_components; ++i) { + pred_vals[0][i] = static_cast(0); + } + this->transform().ComputeCorrection(in_data, pred_vals[0].data(), out_corr); + return true; +} + +template +bool MeshPredictionSchemeConstrainedMultiParallelogramEncoder< + DataTypeT, TransformT, MeshDataT>::EncodePredictionData(EncoderBuffer + *buffer) { + // Encode selected edges using separate rans bit coder for each context. + for (int i = 0; i < kMaxNumParallelograms; ++i) { + // |i| is the context based on the number of available parallelograms, which + // is always equal to |i + 1|. + const int num_used_parallelograms = i + 1; + EncodeVarint(is_crease_edge_[i].size(), buffer); + if (is_crease_edge_[i].size()) { + RAnsBitEncoder encoder; + encoder.StartEncoding(); + // Encode the crease edge flags in the reverse vertex order that is needed + // be the decoder. Note that for the currently supported mode, each vertex + // has exactly |num_used_parallelograms| edges that need to be encoded. + for (int j = static_cast(is_crease_edge_[i].size()) - + num_used_parallelograms; + j >= 0; j -= num_used_parallelograms) { + // Go over all edges of the current vertex. + for (int k = 0; k < num_used_parallelograms; ++k) { + encoder.EncodeBit(is_crease_edge_[i][j + k]); + } + } + encoder.EndEncoding(buffer); + } + } + return MeshPredictionSchemeEncoder::EncodePredictionData(buffer); +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h new file mode 100644 index 000000000..c7a4e351a --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h @@ -0,0 +1,34 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_SHARED_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_SHARED_H_ + +namespace draco { + +// Data shared between constrained multi-parallelogram encoder and decoder. +namespace constrained_multi_parallelogram { + +enum Mode { + // Selects the optimal multi-parallelogram from up to 4 available + // parallelograms. + OPTIMAL_MULTI_PARALLELOGRAM = 0, +}; + +static constexpr int kMaxNumParallelograms = 4; + +} // namespace constrained_multi_parallelogram +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_SHARED_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h new file mode 100644 index 000000000..2960a5e71 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h @@ -0,0 +1,72 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DATA_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DATA_H_ + +#include "draco/mesh/corner_table.h" +#include "draco/mesh/mesh.h" + +namespace draco { + +// Class stores data about the connectivity data of the mesh and information +// about how the connectivity was encoded/decoded. +template +class MeshPredictionSchemeData { + public: + typedef CornerTableT CornerTable; + MeshPredictionSchemeData() + : mesh_(nullptr), + corner_table_(nullptr), + vertex_to_data_map_(nullptr), + data_to_corner_map_(nullptr) {} + + void Set(const Mesh *mesh, const CornerTable *table, + const std::vector *data_to_corner_map, + const std::vector *vertex_to_data_map) { + mesh_ = mesh; + corner_table_ = table; + data_to_corner_map_ = data_to_corner_map; + vertex_to_data_map_ = vertex_to_data_map; + } + + const Mesh *mesh() const { return mesh_; } + const CornerTable *corner_table() const { return corner_table_; } + const std::vector *vertex_to_data_map() const { + return vertex_to_data_map_; + } + const std::vector *data_to_corner_map() const { + return data_to_corner_map_; + } + bool IsInitialized() const { + return mesh_ != nullptr && corner_table_ != nullptr && + vertex_to_data_map_ != nullptr && data_to_corner_map_ != nullptr; + } + + private: + const Mesh *mesh_; + const CornerTable *corner_table_; + + // Mapping between vertices and their encoding order. I.e. when an attribute + // entry on a given vertex was encoded. + const std::vector *vertex_to_data_map_; + + // Array that stores which corner was processed when a given attribute entry + // was encoded or decoded. + const std::vector *data_to_corner_map_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DATA_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h new file mode 100644 index 000000000..6694a981c --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h @@ -0,0 +1,46 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DECODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h" + +namespace draco { + +// Base class for all mesh prediction scheme decoders that use the mesh +// connectivity data. |MeshDataT| can be any class that provides the same +// interface as the PredictionSchemeMeshData class. +template +class MeshPredictionSchemeDecoder + : public PredictionSchemeDecoder { + public: + typedef MeshDataT MeshData; + MeshPredictionSchemeDecoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : PredictionSchemeDecoder(attribute, transform), + mesh_data_(mesh_data) {} + + protected: + const MeshData &mesh_data() const { return mesh_data_; } + + private: + MeshData mesh_data_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h new file mode 100644 index 000000000..ab3c81a39 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h @@ -0,0 +1,46 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_ENCODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h" + +namespace draco { + +// Base class for all mesh prediction scheme encoders that use the mesh +// connectivity data. |MeshDataT| can be any class that provides the same +// interface as the PredictionSchemeMeshData class. +template +class MeshPredictionSchemeEncoder + : public PredictionSchemeEncoder { + public: + typedef MeshDataT MeshData; + MeshPredictionSchemeEncoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : PredictionSchemeEncoder(attribute, transform), + mesh_data_(mesh_data) {} + + protected: + const MeshData &mesh_data() const { return mesh_data_; } + + private: + MeshData mesh_data_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_decoder.h new file mode 100644 index 000000000..da1387a30 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_decoder.h @@ -0,0 +1,172 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_DECODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h" +#include "draco/compression/bit_coders/rans_bit_decoder.h" +#include "draco/draco_features.h" + +namespace draco { + +// See MeshPredictionSchemeGeometricNormalEncoder for documentation. +template +class MeshPredictionSchemeGeometricNormalDecoder + : public MeshPredictionSchemeDecoder { + public: + using CorrType = typename MeshPredictionSchemeDecoder::CorrType; + MeshPredictionSchemeGeometricNormalDecoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeDecoder( + attribute, transform, mesh_data), + predictor_(mesh_data) {} + + private: + MeshPredictionSchemeGeometricNormalDecoder() {} + + public: + bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int size, int num_components, + const PointIndex *entry_to_point_id_map) override; + + bool DecodePredictionData(DecoderBuffer *buffer) override; + + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_GEOMETRIC_NORMAL; + } + + bool IsInitialized() const override { + if (!predictor_.IsInitialized()) { + return false; + } + if (!this->mesh_data().IsInitialized()) { + return false; + } + if (!octahedron_tool_box_.IsInitialized()) { + return false; + } + return true; + } + + int GetNumParentAttributes() const override { return 1; } + + GeometryAttribute::Type GetParentAttributeType(int i) const override { + DRACO_DCHECK_EQ(i, 0); + (void)i; + return GeometryAttribute::POSITION; + } + + bool SetParentAttribute(const PointAttribute *att) override { + if (att->attribute_type() != GeometryAttribute::POSITION) { + return false; // Invalid attribute type. + } + if (att->num_components() != 3) { + return false; // Currently works only for 3 component positions. + } + predictor_.SetPositionAttribute(*att); + return true; + } + void SetQuantizationBits(int q) { + octahedron_tool_box_.SetQuantizationBits(q); + } + + private: + MeshPredictionSchemeGeometricNormalPredictorArea + predictor_; + OctahedronToolBox octahedron_tool_box_; + RAnsBitDecoder flip_normal_bit_decoder_; +}; + +template +bool MeshPredictionSchemeGeometricNormalDecoder< + DataTypeT, TransformT, + MeshDataT>::ComputeOriginalValues(const CorrType *in_corr, + DataTypeT *out_data, int /* size */, + int num_components, + const PointIndex *entry_to_point_id_map) { + this->SetQuantizationBits(this->transform().quantization_bits()); + predictor_.SetEntryToPointIdMap(entry_to_point_id_map); + DRACO_DCHECK(this->IsInitialized()); + + // Expecting in_data in octahedral coordinates, i.e., portable attribute. + DRACO_DCHECK_EQ(num_components, 2); + + const int corner_map_size = + static_cast(this->mesh_data().data_to_corner_map()->size()); + + VectorD pred_normal_3d; + int32_t pred_normal_oct[2]; + + for (int data_id = 0; data_id < corner_map_size; ++data_id) { + const CornerIndex corner_id = + this->mesh_data().data_to_corner_map()->at(data_id); + predictor_.ComputePredictedValue(corner_id, pred_normal_3d.data()); + + // Compute predicted octahedral coordinates. + octahedron_tool_box_.CanonicalizeIntegerVector(pred_normal_3d.data()); + DRACO_DCHECK_EQ(pred_normal_3d.AbsSum(), + octahedron_tool_box_.center_value()); + if (flip_normal_bit_decoder_.DecodeNextBit()) { + pred_normal_3d = -pred_normal_3d; + } + octahedron_tool_box_.IntegerVectorToQuantizedOctahedralCoords( + pred_normal_3d.data(), pred_normal_oct, pred_normal_oct + 1); + + const int data_offset = data_id * 2; + this->transform().ComputeOriginalValue( + pred_normal_oct, in_corr + data_offset, out_data + data_offset); + } + flip_normal_bit_decoder_.EndDecoding(); + return true; +} + +template +bool MeshPredictionSchemeGeometricNormalDecoder< + DataTypeT, TransformT, MeshDataT>::DecodePredictionData(DecoderBuffer + *buffer) { + // Get data needed for transform + if (!this->transform().DecodeTransformData(buffer)) { + return false; + } + +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) { + uint8_t prediction_mode; + if (!buffer->Decode(&prediction_mode)) { + return false; + } + + if (!predictor_.SetNormalPredictionMode( + NormalPredictionMode(prediction_mode))) { + return false; + } + } +#endif + + // Init normal flips. + if (!flip_normal_bit_decoder_.StartDecoding(buffer)) { + return false; + } + + return true; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_encoder.h new file mode 100644 index 000000000..cf146f83a --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_encoder.h @@ -0,0 +1,180 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_ENCODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h" +#include "draco/compression/bit_coders/rans_bit_encoder.h" +#include "draco/compression/config/compression_shared.h" + +namespace draco { + +// Prediction scheme for normals based on the underlying geometry. +// At a smooth vertices normals are computed by weighting the normals of +// adjacent faces with the area of these faces. At seams, the same approach +// applies for seam corners. +template +class MeshPredictionSchemeGeometricNormalEncoder + : public MeshPredictionSchemeEncoder { + public: + using CorrType = typename MeshPredictionSchemeEncoder::CorrType; + MeshPredictionSchemeGeometricNormalEncoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeEncoder( + attribute, transform, mesh_data), + predictor_(mesh_data) {} + + bool ComputeCorrectionValues( + const DataTypeT *in_data, CorrType *out_corr, int size, + int num_components, const PointIndex *entry_to_point_id_map) override; + + bool EncodePredictionData(EncoderBuffer *buffer) override; + + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_GEOMETRIC_NORMAL; + } + + bool IsInitialized() const override { + if (!predictor_.IsInitialized()) { + return false; + } + if (!this->mesh_data().IsInitialized()) { + return false; + } + return true; + } + + int GetNumParentAttributes() const override { return 1; } + + GeometryAttribute::Type GetParentAttributeType(int i) const override { + DRACO_DCHECK_EQ(i, 0); + (void)i; + return GeometryAttribute::POSITION; + } + + bool SetParentAttribute(const PointAttribute *att) override { + if (att->attribute_type() != GeometryAttribute::POSITION) { + return false; // Invalid attribute type. + } + if (att->num_components() != 3) { + return false; // Currently works only for 3 component positions. + } + predictor_.SetPositionAttribute(*att); + return true; + } + + private: + void SetQuantizationBits(int q) { + DRACO_DCHECK_GE(q, 2); + DRACO_DCHECK_LE(q, 30); + octahedron_tool_box_.SetQuantizationBits(q); + } + MeshPredictionSchemeGeometricNormalPredictorArea + predictor_; + + OctahedronToolBox octahedron_tool_box_; + RAnsBitEncoder flip_normal_bit_encoder_; +}; + +template +bool MeshPredictionSchemeGeometricNormalEncoder:: + ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr, + int size, int num_components, + const PointIndex *entry_to_point_id_map) { + this->SetQuantizationBits(this->transform().quantization_bits()); + predictor_.SetEntryToPointIdMap(entry_to_point_id_map); + DRACO_DCHECK(this->IsInitialized()); + // Expecting in_data in octahedral coordinates, i.e., portable attribute. + DRACO_DCHECK_EQ(num_components, 2); + + flip_normal_bit_encoder_.StartEncoding(); + + const int corner_map_size = + static_cast(this->mesh_data().data_to_corner_map()->size()); + + VectorD pred_normal_3d; + VectorD pos_pred_normal_oct; + VectorD neg_pred_normal_oct; + VectorD pos_correction; + VectorD neg_correction; + for (int data_id = 0; data_id < corner_map_size; ++data_id) { + const CornerIndex corner_id = + this->mesh_data().data_to_corner_map()->at(data_id); + predictor_.ComputePredictedValue(corner_id, pred_normal_3d.data()); + + // Compute predicted octahedral coordinates. + octahedron_tool_box_.CanonicalizeIntegerVector(pred_normal_3d.data()); + DRACO_DCHECK_EQ(pred_normal_3d.AbsSum(), + octahedron_tool_box_.center_value()); + + // Compute octahedral coordinates for both possible directions. + octahedron_tool_box_.IntegerVectorToQuantizedOctahedralCoords( + pred_normal_3d.data(), pos_pred_normal_oct.data(), + pos_pred_normal_oct.data() + 1); + pred_normal_3d = -pred_normal_3d; + octahedron_tool_box_.IntegerVectorToQuantizedOctahedralCoords( + pred_normal_3d.data(), neg_pred_normal_oct.data(), + neg_pred_normal_oct.data() + 1); + + // Choose the one with the best correction value. + const int data_offset = data_id * 2; + this->transform().ComputeCorrection(in_data + data_offset, + pos_pred_normal_oct.data(), + pos_correction.data()); + this->transform().ComputeCorrection(in_data + data_offset, + neg_pred_normal_oct.data(), + neg_correction.data()); + pos_correction[0] = octahedron_tool_box_.ModMax(pos_correction[0]); + pos_correction[1] = octahedron_tool_box_.ModMax(pos_correction[1]); + neg_correction[0] = octahedron_tool_box_.ModMax(neg_correction[0]); + neg_correction[1] = octahedron_tool_box_.ModMax(neg_correction[1]); + if (pos_correction.AbsSum() < neg_correction.AbsSum()) { + flip_normal_bit_encoder_.EncodeBit(false); + (out_corr + data_offset)[0] = + octahedron_tool_box_.MakePositive(pos_correction[0]); + (out_corr + data_offset)[1] = + octahedron_tool_box_.MakePositive(pos_correction[1]); + } else { + flip_normal_bit_encoder_.EncodeBit(true); + (out_corr + data_offset)[0] = + octahedron_tool_box_.MakePositive(neg_correction[0]); + (out_corr + data_offset)[1] = + octahedron_tool_box_.MakePositive(neg_correction[1]); + } + } + return true; +} + +template +bool MeshPredictionSchemeGeometricNormalEncoder< + DataTypeT, TransformT, MeshDataT>::EncodePredictionData(EncoderBuffer + *buffer) { + if (!this->transform().EncodeTransformData(buffer)) { + return false; + } + + // Encode normal flips. + flip_normal_bit_encoder_.EndEncoding(buffer); + return true; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h new file mode 100644 index 000000000..775eded6b --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h @@ -0,0 +1,117 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_AREA_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_AREA_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h" + +namespace draco { + +// This predictor estimates the normal via the surrounding triangles of the +// given corner. Triangles are weighted according to their area. +template +class MeshPredictionSchemeGeometricNormalPredictorArea + : public MeshPredictionSchemeGeometricNormalPredictorBase< + DataTypeT, TransformT, MeshDataT> { + typedef MeshPredictionSchemeGeometricNormalPredictorBase< + DataTypeT, TransformT, MeshDataT> + Base; + + public: + explicit MeshPredictionSchemeGeometricNormalPredictorArea(const MeshDataT &md) + : Base(md) { + this->SetNormalPredictionMode(TRIANGLE_AREA); + }; + virtual ~MeshPredictionSchemeGeometricNormalPredictorArea() {} + + // Computes predicted octahedral coordinates on a given corner. + void ComputePredictedValue(CornerIndex corner_id, + DataTypeT *prediction) override { + DRACO_DCHECK(this->IsInitialized()); + typedef typename MeshDataT::CornerTable CornerTable; + const CornerTable *const corner_table = this->mesh_data_.corner_table(); + // Going to compute the predicted normal from the surrounding triangles + // according to the connectivity of the given corner table. + VertexCornersIterator cit(corner_table, corner_id); + // Position of central vertex does not change in loop. + const VectorD pos_cent = this->GetPositionForCorner(corner_id); + // Computing normals for triangles and adding them up. + + VectorD normal; + CornerIndex c_next, c_prev; + while (!cit.End()) { + // Getting corners. + if (this->normal_prediction_mode_ == ONE_TRIANGLE) { + c_next = corner_table->Next(corner_id); + c_prev = corner_table->Previous(corner_id); + } else { + c_next = corner_table->Next(cit.Corner()); + c_prev = corner_table->Previous(cit.Corner()); + } + const VectorD pos_next = this->GetPositionForCorner(c_next); + const VectorD pos_prev = this->GetPositionForCorner(c_prev); + + // Computing delta vectors to next and prev. + const VectorD delta_next = pos_next - pos_cent; + const VectorD delta_prev = pos_prev - pos_cent; + + // Computing cross product. + const VectorD cross = CrossProduct(delta_next, delta_prev); + + // Prevent signed integer overflows by doing math as unsigned. + auto normal_data = reinterpret_cast(normal.data()); + auto cross_data = reinterpret_cast(cross.data()); + normal_data[0] = normal_data[0] + cross_data[0]; + normal_data[1] = normal_data[1] + cross_data[1]; + normal_data[2] = normal_data[2] + cross_data[2]; + + cit.Next(); + } + + // Convert to int32_t, make sure entries are not too large. + constexpr int64_t upper_bound = 1 << 29; + if (this->normal_prediction_mode_ == ONE_TRIANGLE) { + const int32_t abs_sum = static_cast(normal.AbsSum()); + if (abs_sum > upper_bound) { + const int64_t quotient = abs_sum / upper_bound; + normal = normal / quotient; + } + } else { + const int64_t abs_sum = normal.AbsSum(); + if (abs_sum > upper_bound) { + const int64_t quotient = abs_sum / upper_bound; + normal = normal / quotient; + } + } + DRACO_DCHECK_LE(normal.AbsSum(), upper_bound); + prediction[0] = static_cast(normal[0]); + prediction[1] = static_cast(normal[1]); + prediction[2] = static_cast(normal[2]); + } + bool SetNormalPredictionMode(NormalPredictionMode mode) override { + if (mode == ONE_TRIANGLE) { + this->normal_prediction_mode_ = mode; + return true; + } else if (mode == TRIANGLE_AREA) { + this->normal_prediction_mode_ = mode; + return true; + } + return false; + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_AREA_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h new file mode 100644 index 000000000..a554dda96 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h @@ -0,0 +1,96 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_BASE_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_BASE_H_ + +#include + +#include "draco/attributes/point_attribute.h" +#include "draco/compression/attributes/normal_compression_utils.h" +#include "draco/compression/config/compression_shared.h" +#include "draco/core/math_utils.h" +#include "draco/core/vector_d.h" +#include "draco/mesh/corner_table.h" +#include "draco/mesh/corner_table_iterators.h" + +namespace draco { + +// Base class for geometric normal predictors using position attribute. +template +class MeshPredictionSchemeGeometricNormalPredictorBase { + protected: + explicit MeshPredictionSchemeGeometricNormalPredictorBase(const MeshDataT &md) + : pos_attribute_(nullptr), + entry_to_point_id_map_(nullptr), + mesh_data_(md) {} + virtual ~MeshPredictionSchemeGeometricNormalPredictorBase() {} + + public: + void SetPositionAttribute(const PointAttribute &position_attribute) { + pos_attribute_ = &position_attribute; + } + void SetEntryToPointIdMap(const PointIndex *map) { + entry_to_point_id_map_ = map; + } + bool IsInitialized() const { + if (pos_attribute_ == nullptr) { + return false; + } + if (entry_to_point_id_map_ == nullptr) { + return false; + } + return true; + } + + virtual bool SetNormalPredictionMode(NormalPredictionMode mode) = 0; + virtual NormalPredictionMode GetNormalPredictionMode() const { + return normal_prediction_mode_; + } + + protected: + VectorD GetPositionForDataId(int data_id) const { + DRACO_DCHECK(this->IsInitialized()); + const auto point_id = entry_to_point_id_map_[data_id]; + const auto pos_val_id = pos_attribute_->mapped_index(point_id); + VectorD pos; + pos_attribute_->ConvertValue(pos_val_id, &pos[0]); + return pos; + } + VectorD GetPositionForCorner(CornerIndex ci) const { + DRACO_DCHECK(this->IsInitialized()); + const auto corner_table = mesh_data_.corner_table(); + const auto vert_id = corner_table->Vertex(ci).value(); + const auto data_id = mesh_data_.vertex_to_data_map()->at(vert_id); + return GetPositionForDataId(data_id); + } + VectorD GetOctahedralCoordForDataId(int data_id, + const DataTypeT *data) const { + DRACO_DCHECK(this->IsInitialized()); + const int data_offset = data_id * 2; + return VectorD(data[data_offset], data[data_offset + 1]); + } + // Computes predicted octahedral coordinates on a given corner. + virtual void ComputePredictedValue(CornerIndex corner_id, + DataTypeT *prediction) = 0; + + const PointAttribute *pos_attribute_; + const PointIndex *entry_to_point_id_map_; + MeshDataT mesh_data_; + NormalPredictionMode normal_prediction_mode_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_BASE_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h new file mode 100644 index 000000000..fc82e0a8f --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h @@ -0,0 +1,126 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_DECODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h" +#include "draco/draco_features.h" + +namespace draco { + +// Decoder for predictions encoded by multi-parallelogram encoding scheme. +// See the corresponding encoder for method description. +template +class MeshPredictionSchemeMultiParallelogramDecoder + : public MeshPredictionSchemeDecoder { + public: + using CorrType = + typename PredictionSchemeDecoder::CorrType; + using CornerTable = typename MeshDataT::CornerTable; + + explicit MeshPredictionSchemeMultiParallelogramDecoder( + const PointAttribute *attribute) + : MeshPredictionSchemeDecoder( + attribute) {} + MeshPredictionSchemeMultiParallelogramDecoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeDecoder( + attribute, transform, mesh_data) {} + + bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int size, int num_components, + const PointIndex *entry_to_point_id_map) override; + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_MULTI_PARALLELOGRAM; + } + + bool IsInitialized() const override { + return this->mesh_data().IsInitialized(); + } +}; + +template +bool MeshPredictionSchemeMultiParallelogramDecoder:: + ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int /* size */, int num_components, + const PointIndex * /* entry_to_point_id_map */) { + this->transform().Init(num_components); + + // For storage of prediction values (already initialized to zero). + std::unique_ptr pred_vals(new DataTypeT[num_components]()); + std::unique_ptr parallelogram_pred_vals( + new DataTypeT[num_components]()); + + this->transform().ComputeOriginalValue(pred_vals.get(), in_corr, out_data); + + const CornerTable *const table = this->mesh_data().corner_table(); + const std::vector *const vertex_to_data_map = + this->mesh_data().vertex_to_data_map(); + + const int corner_map_size = + static_cast(this->mesh_data().data_to_corner_map()->size()); + for (int p = 1; p < corner_map_size; ++p) { + const CornerIndex start_corner_id = + this->mesh_data().data_to_corner_map()->at(p); + + CornerIndex corner_id(start_corner_id); + int num_parallelograms = 0; + for (int i = 0; i < num_components; ++i) { + pred_vals[i] = static_cast(0); + } + while (corner_id != kInvalidCornerIndex) { + if (ComputeParallelogramPrediction( + p, corner_id, table, *vertex_to_data_map, out_data, + num_components, parallelogram_pred_vals.get())) { + for (int c = 0; c < num_components; ++c) { + pred_vals[c] += parallelogram_pred_vals[c]; + } + ++num_parallelograms; + } + + // Proceed to the next corner attached to the vertex. + corner_id = table->SwingRight(corner_id); + if (corner_id == start_corner_id) { + corner_id = kInvalidCornerIndex; + } + } + + const int dst_offset = p * num_components; + if (num_parallelograms == 0) { + // No parallelogram was valid. + // We use the last decoded point as a reference. + const int src_offset = (p - 1) * num_components; + this->transform().ComputeOriginalValue( + out_data + src_offset, in_corr + dst_offset, out_data + dst_offset); + } else { + // Compute the correction from the predicted value. + for (int c = 0; c < num_components; ++c) { + pred_vals[c] /= num_parallelograms; + } + this->transform().ComputeOriginalValue( + pred_vals.get(), in_corr + dst_offset, out_data + dst_offset); + } + } + return true; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_DECODER_H_ +#endif diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_encoder.h new file mode 100644 index 000000000..301b357d4 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_encoder.h @@ -0,0 +1,133 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_ENCODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h" + +namespace draco { + +// Multi parallelogram prediction predicts attribute values using information +// from all opposite faces to the predicted vertex, compared to the standard +// prediction scheme, where only one opposite face is used (see +// prediction_scheme_parallelogram.h). This approach is generally slower than +// the standard parallelogram prediction, but it usually results in better +// prediction (5 - 20% based on the quantization level. Better gains can be +// achieved when more aggressive quantization is used). +template +class MeshPredictionSchemeMultiParallelogramEncoder + : public MeshPredictionSchemeEncoder { + public: + using CorrType = + typename PredictionSchemeEncoder::CorrType; + using CornerTable = typename MeshDataT::CornerTable; + + explicit MeshPredictionSchemeMultiParallelogramEncoder( + const PointAttribute *attribute) + : MeshPredictionSchemeEncoder( + attribute) {} + MeshPredictionSchemeMultiParallelogramEncoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeEncoder( + attribute, transform, mesh_data) {} + + bool ComputeCorrectionValues( + const DataTypeT *in_data, CorrType *out_corr, int size, + int num_components, const PointIndex *entry_to_point_id_map) override; + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_MULTI_PARALLELOGRAM; + } + + bool IsInitialized() const override { + return this->mesh_data().IsInitialized(); + } +}; + +template +bool MeshPredictionSchemeMultiParallelogramEncoder:: + ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr, + int size, int num_components, + const PointIndex * /* entry_to_point_id_map */) { + this->transform().Init(in_data, size, num_components); + const CornerTable *const table = this->mesh_data().corner_table(); + const std::vector *const vertex_to_data_map = + this->mesh_data().vertex_to_data_map(); + + // For storage of prediction values (already initialized to zero). + std::unique_ptr pred_vals(new DataTypeT[num_components]()); + std::unique_ptr parallelogram_pred_vals( + new DataTypeT[num_components]()); + + // We start processing from the end because this prediction uses data from + // previous entries that could be overwritten when an entry is processed. + for (int p = + static_cast(this->mesh_data().data_to_corner_map()->size() - 1); + p > 0; --p) { + const CornerIndex start_corner_id = + this->mesh_data().data_to_corner_map()->at(p); + + // Go over all corners attached to the vertex and compute the predicted + // value from the parallelograms defined by their opposite faces. + CornerIndex corner_id(start_corner_id); + int num_parallelograms = 0; + for (int i = 0; i < num_components; ++i) { + pred_vals[i] = static_cast(0); + } + while (corner_id != kInvalidCornerIndex) { + if (ComputeParallelogramPrediction( + p, corner_id, table, *vertex_to_data_map, in_data, num_components, + parallelogram_pred_vals.get())) { + for (int c = 0; c < num_components; ++c) { + pred_vals[c] += parallelogram_pred_vals[c]; + } + ++num_parallelograms; + } + + // Proceed to the next corner attached to the vertex. + corner_id = table->SwingRight(corner_id); + if (corner_id == start_corner_id) { + corner_id = kInvalidCornerIndex; + } + } + const int dst_offset = p * num_components; + if (num_parallelograms == 0) { + // No parallelogram was valid. + // We use the last encoded point as a reference. + const int src_offset = (p - 1) * num_components; + this->transform().ComputeCorrection( + in_data + dst_offset, in_data + src_offset, out_corr + dst_offset); + } else { + // Compute the correction from the predicted value. + for (int c = 0; c < num_components; ++c) { + pred_vals[c] /= num_parallelograms; + } + this->transform().ComputeCorrection(in_data + dst_offset, pred_vals.get(), + out_corr + dst_offset); + } + } + // First element is always fixed because it cannot be predicted. + for (int i = 0; i < num_components; ++i) { + pred_vals[i] = static_cast(0); + } + this->transform().ComputeCorrection(in_data, pred_vals.get(), out_corr); + return true; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_decoder.h new file mode 100644 index 000000000..4d47ddf30 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_decoder.h @@ -0,0 +1,98 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_DECODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h" + +namespace draco { + +// Decoder for attribute values encoded with the standard parallelogram +// prediction. See the description of the corresponding encoder for more +// details. +template +class MeshPredictionSchemeParallelogramDecoder + : public MeshPredictionSchemeDecoder { + public: + using CorrType = + typename PredictionSchemeDecoder::CorrType; + using CornerTable = typename MeshDataT::CornerTable; + explicit MeshPredictionSchemeParallelogramDecoder( + const PointAttribute *attribute) + : MeshPredictionSchemeDecoder( + attribute) {} + MeshPredictionSchemeParallelogramDecoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeDecoder( + attribute, transform, mesh_data) {} + + bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int size, int num_components, + const PointIndex *entry_to_point_id_map) override; + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_PARALLELOGRAM; + } + + bool IsInitialized() const override { + return this->mesh_data().IsInitialized(); + } +}; + +template +bool MeshPredictionSchemeParallelogramDecoder:: + ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int /* size */, int num_components, + const PointIndex * /* entry_to_point_id_map */) { + this->transform().Init(num_components); + + const CornerTable *const table = this->mesh_data().corner_table(); + const std::vector *const vertex_to_data_map = + this->mesh_data().vertex_to_data_map(); + + // For storage of prediction values (already initialized to zero). + std::unique_ptr pred_vals(new DataTypeT[num_components]()); + + // Restore the first value. + this->transform().ComputeOriginalValue(pred_vals.get(), in_corr, out_data); + + const int corner_map_size = + static_cast(this->mesh_data().data_to_corner_map()->size()); + for (int p = 1; p < corner_map_size; ++p) { + const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p); + const int dst_offset = p * num_components; + if (!ComputeParallelogramPrediction(p, corner_id, table, + *vertex_to_data_map, out_data, + num_components, pred_vals.get())) { + // Parallelogram could not be computed, Possible because some of the + // vertices are not valid (not encoded yet). + // We use the last encoded point as a reference (delta coding). + const int src_offset = (p - 1) * num_components; + this->transform().ComputeOriginalValue( + out_data + src_offset, in_corr + dst_offset, out_data + dst_offset); + } else { + // Apply the parallelogram prediction. + this->transform().ComputeOriginalValue( + pred_vals.get(), in_corr + dst_offset, out_data + dst_offset); + } + } + return true; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h new file mode 100644 index 000000000..f00801932 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h @@ -0,0 +1,111 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_ENCODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h" + +namespace draco { + +// Parallelogram prediction predicts an attribute value V from three vertices +// on the opposite face to the predicted vertex. The values on the three +// vertices are used to construct a parallelogram V' = O - A - B, where O is the +// value on the opposite vertex, and A, B are values on the shared vertices: +// V +// / \ +// / \ +// / \ +// A-------B +// \ / +// \ / +// \ / +// O +// +template +class MeshPredictionSchemeParallelogramEncoder + : public MeshPredictionSchemeEncoder { + public: + using CorrType = + typename PredictionSchemeEncoder::CorrType; + using CornerTable = typename MeshDataT::CornerTable; + explicit MeshPredictionSchemeParallelogramEncoder( + const PointAttribute *attribute) + : MeshPredictionSchemeEncoder( + attribute) {} + MeshPredictionSchemeParallelogramEncoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeEncoder( + attribute, transform, mesh_data) {} + + bool ComputeCorrectionValues( + const DataTypeT *in_data, CorrType *out_corr, int size, + int num_components, const PointIndex *entry_to_point_id_map) override; + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_PARALLELOGRAM; + } + + bool IsInitialized() const override { + return this->mesh_data().IsInitialized(); + } +}; + +template +bool MeshPredictionSchemeParallelogramEncoder:: + ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr, + int size, int num_components, + const PointIndex * /* entry_to_point_id_map */) { + this->transform().Init(in_data, size, num_components); + // For storage of prediction values (already initialized to zero). + std::unique_ptr pred_vals(new DataTypeT[num_components]()); + + // We start processing from the end because this prediction uses data from + // previous entries that could be overwritten when an entry is processed. + const CornerTable *const table = this->mesh_data().corner_table(); + const std::vector *const vertex_to_data_map = + this->mesh_data().vertex_to_data_map(); + for (int p = + static_cast(this->mesh_data().data_to_corner_map()->size() - 1); + p > 0; --p) { + const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p); + const int dst_offset = p * num_components; + if (!ComputeParallelogramPrediction(p, corner_id, table, + *vertex_to_data_map, in_data, + num_components, pred_vals.get())) { + // Parallelogram could not be computed, Possible because some of the + // vertices are not valid (not encoded yet). + // We use the last encoded point as a reference (delta coding). + const int src_offset = (p - 1) * num_components; + this->transform().ComputeCorrection( + in_data + dst_offset, in_data + src_offset, out_corr + dst_offset); + } else { + // Apply the parallelogram prediction. + this->transform().ComputeCorrection(in_data + dst_offset, pred_vals.get(), + out_corr + dst_offset); + } + } + // First element is always fixed because it cannot be predicted. + for (int i = 0; i < num_components; ++i) { + pred_vals[i] = static_cast(0); + } + this->transform().ComputeCorrection(in_data, pred_vals.get(), out_corr); + return true; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h new file mode 100644 index 000000000..fd10fb524 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h @@ -0,0 +1,78 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Shared functionality for different parallelogram prediction schemes. + +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_SHARED_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_SHARED_H_ + +#include "draco/mesh/corner_table.h" +#include "draco/mesh/mesh.h" + +namespace draco { + +// TODO(draco-eng) consolidate Vertex/next/previous queries to one call +// (performance). +template +inline void GetParallelogramEntries( + const CornerIndex ci, const CornerTableT *table, + const std::vector &vertex_to_data_map, int *opp_entry, + int *next_entry, int *prev_entry) { + // One vertex of the input |table| correspond to exactly one attribute value + // entry. The |table| can be either CornerTable for per-vertex attributes, + // or MeshAttributeCornerTable for attributes with interior seams. + *opp_entry = vertex_to_data_map[table->Vertex(ci).value()]; + *next_entry = vertex_to_data_map[table->Vertex(table->Next(ci)).value()]; + *prev_entry = vertex_to_data_map[table->Vertex(table->Previous(ci)).value()]; +} + +// Computes parallelogram prediction for a given corner and data entry id. +// The prediction is stored in |out_prediction|. +// Function returns false when the prediction couldn't be computed, e.g. because +// not all entry points were available. +template +inline bool ComputeParallelogramPrediction( + int data_entry_id, const CornerIndex ci, const CornerTableT *table, + const std::vector &vertex_to_data_map, const DataTypeT *in_data, + int num_components, DataTypeT *out_prediction) { + const CornerIndex oci = table->Opposite(ci); + if (oci == kInvalidCornerIndex) { + return false; + } + int vert_opp, vert_next, vert_prev; + GetParallelogramEntries(oci, table, vertex_to_data_map, + &vert_opp, &vert_next, &vert_prev); + if (vert_opp < data_entry_id && vert_next < data_entry_id && + vert_prev < data_entry_id) { + // Apply the parallelogram prediction. + const int v_opp_off = vert_opp * num_components; + const int v_next_off = vert_next * num_components; + const int v_prev_off = vert_prev * num_components; + for (int c = 0; c < num_components; ++c) { + const int64_t in_data_next_off = in_data[v_next_off + c]; + const int64_t in_data_prev_off = in_data[v_prev_off + c]; + const int64_t in_data_opp_off = in_data[v_opp_off + c]; + const int64_t result = + (in_data_next_off + in_data_prev_off) - in_data_opp_off; + + out_prediction[c] = static_cast(result); + } + return true; + } + return false; // Not all data is available for prediction +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_SHARED_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_decoder.h new file mode 100644 index 000000000..02cf7e60f --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_decoder.h @@ -0,0 +1,344 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_DECODER_H_ + +#include + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h" +#include "draco/compression/bit_coders/rans_bit_decoder.h" +#include "draco/core/varint_decoding.h" +#include "draco/core/vector_d.h" +#include "draco/draco_features.h" +#include "draco/mesh/corner_table.h" + +namespace draco { + +// Decoder for predictions of UV coordinates encoded by our specialized texture +// coordinate predictor. See the corresponding encoder for more details. Note +// that this predictor is not portable and should not be used anymore. See +// MeshPredictionSchemeTexCoordsPortableEncoder/Decoder for a portable version +// of this prediction scheme. +template +class MeshPredictionSchemeTexCoordsDecoder + : public MeshPredictionSchemeDecoder { + public: + using CorrType = typename MeshPredictionSchemeDecoder::CorrType; + MeshPredictionSchemeTexCoordsDecoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data, int version) + : MeshPredictionSchemeDecoder( + attribute, transform, mesh_data), + pos_attribute_(nullptr), + entry_to_point_id_map_(nullptr), + num_components_(0), + version_(version) {} + + bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int size, int num_components, + const PointIndex *entry_to_point_id_map) override; + + bool DecodePredictionData(DecoderBuffer *buffer) override; + + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_TEX_COORDS_DEPRECATED; + } + + bool IsInitialized() const override { + if (pos_attribute_ == nullptr) { + return false; + } + if (!this->mesh_data().IsInitialized()) { + return false; + } + return true; + } + + int GetNumParentAttributes() const override { return 1; } + + GeometryAttribute::Type GetParentAttributeType(int i) const override { + DRACO_DCHECK_EQ(i, 0); + (void)i; + return GeometryAttribute::POSITION; + } + + bool SetParentAttribute(const PointAttribute *att) override { + if (att == nullptr) { + return false; + } + if (att->attribute_type() != GeometryAttribute::POSITION) { + return false; // Invalid attribute type. + } + if (att->num_components() != 3) { + return false; // Currently works only for 3 component positions. + } + pos_attribute_ = att; + return true; + } + + protected: + Vector3f GetPositionForEntryId(int entry_id) const { + const PointIndex point_id = entry_to_point_id_map_[entry_id]; + Vector3f pos; + pos_attribute_->ConvertValue(pos_attribute_->mapped_index(point_id), + &pos[0]); + return pos; + } + + Vector2f GetTexCoordForEntryId(int entry_id, const DataTypeT *data) const { + const int data_offset = entry_id * num_components_; + return Vector2f(static_cast(data[data_offset]), + static_cast(data[data_offset + 1])); + } + + void ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data, + int data_id); + + private: + const PointAttribute *pos_attribute_; + const PointIndex *entry_to_point_id_map_; + std::unique_ptr predicted_value_; + int num_components_; + // Encoded / decoded array of UV flips. + std::vector orientations_; + int version_; +}; + +template +bool MeshPredictionSchemeTexCoordsDecoder:: + ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int /* size */, int num_components, + const PointIndex *entry_to_point_id_map) { + num_components_ = num_components; + entry_to_point_id_map_ = entry_to_point_id_map; + predicted_value_ = + std::unique_ptr(new DataTypeT[num_components]); + this->transform().Init(num_components); + + const int corner_map_size = + static_cast(this->mesh_data().data_to_corner_map()->size()); + for (int p = 0; p < corner_map_size; ++p) { + const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p); + ComputePredictedValue(corner_id, out_data, p); + + const int dst_offset = p * num_components; + this->transform().ComputeOriginalValue( + predicted_value_.get(), in_corr + dst_offset, out_data + dst_offset); + } + return true; +} + +template +bool MeshPredictionSchemeTexCoordsDecoder:: + DecodePredictionData(DecoderBuffer *buffer) { + // Decode the delta coded orientations. + uint32_t num_orientations = 0; + if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) { + if (!buffer->Decode(&num_orientations)) { + return false; + } + } else { + if (!DecodeVarint(&num_orientations, buffer)) { + return false; + } + } + if (num_orientations == 0) { + return false; + } + orientations_.resize(num_orientations); + bool last_orientation = true; + RAnsBitDecoder decoder; + if (!decoder.StartDecoding(buffer)) { + return false; + } + for (uint32_t i = 0; i < num_orientations; ++i) { + if (!decoder.DecodeNextBit()) { + last_orientation = !last_orientation; + } + orientations_[i] = last_orientation; + } + decoder.EndDecoding(); + return MeshPredictionSchemeDecoder::DecodePredictionData(buffer); +} + +template +void MeshPredictionSchemeTexCoordsDecoder:: + ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data, + int data_id) { + // Compute the predicted UV coordinate from the positions on all corners + // of the processed triangle. For the best prediction, the UV coordinates + // on the next/previous corners need to be already encoded/decoded. + const CornerIndex next_corner_id = + this->mesh_data().corner_table()->Next(corner_id); + const CornerIndex prev_corner_id = + this->mesh_data().corner_table()->Previous(corner_id); + // Get the encoded data ids from the next and previous corners. + // The data id is the encoding order of the UV coordinates. + int next_data_id, prev_data_id; + + int next_vert_id, prev_vert_id; + next_vert_id = + this->mesh_data().corner_table()->Vertex(next_corner_id).value(); + prev_vert_id = + this->mesh_data().corner_table()->Vertex(prev_corner_id).value(); + + next_data_id = this->mesh_data().vertex_to_data_map()->at(next_vert_id); + prev_data_id = this->mesh_data().vertex_to_data_map()->at(prev_vert_id); + + if (prev_data_id < data_id && next_data_id < data_id) { + // Both other corners have available UV coordinates for prediction. + const Vector2f n_uv = GetTexCoordForEntryId(next_data_id, data); + const Vector2f p_uv = GetTexCoordForEntryId(prev_data_id, data); + if (p_uv == n_uv) { + // We cannot do a reliable prediction on degenerated UV triangles. + predicted_value_[0] = static_cast(p_uv[0]); + predicted_value_[1] = static_cast(p_uv[1]); + return; + } + + // Get positions at all corners. + const Vector3f tip_pos = GetPositionForEntryId(data_id); + const Vector3f next_pos = GetPositionForEntryId(next_data_id); + const Vector3f prev_pos = GetPositionForEntryId(prev_data_id); + // Use the positions of the above triangle to predict the texture coordinate + // on the tip corner C. + // Convert the triangle into a new coordinate system defined by orthogonal + // bases vectors S, T, where S is vector prev_pos - next_pos and T is an + // perpendicular vector to S in the same plane as vector the + // tip_pos - next_pos. + // The transformed triangle in the new coordinate system is then going to + // be represented as: + // + // 1 ^ + // | + // | + // | C + // | / \ + // | / \ + // |/ \ + // N--------------P + // 0 1 + // + // Where next_pos point (N) is at position (0, 0), prev_pos point (P) is + // at (1, 0). Our goal is to compute the position of the tip_pos point (C) + // in this new coordinate space (s, t). + // + const Vector3f pn = prev_pos - next_pos; + const Vector3f cn = tip_pos - next_pos; + const float pn_norm2_squared = pn.SquaredNorm(); + // Coordinate s of the tip corner C is simply the dot product of the + // normalized vectors |pn| and |cn| (normalized by the length of |pn|). + // Since both of these vectors are normalized, we don't need to perform the + // normalization explicitly and instead we can just use the squared norm + // of |pn| as a denominator of the resulting dot product of non normalized + // vectors. + float s, t; + // |pn_norm2_squared| can be exactly 0 when the next_pos and prev_pos are + // the same positions (e.g. because they were quantized to the same + // location). + if (version_ < DRACO_BITSTREAM_VERSION(1, 2) || pn_norm2_squared > 0) { + s = pn.Dot(cn) / pn_norm2_squared; + // To get the coordinate t, we can use formula: + // t = |C-N - (P-N) * s| / |P-N| + // Do not use std::sqrt to avoid changes in the bitstream. + t = sqrt((cn - pn * s).SquaredNorm() / pn_norm2_squared); + } else { + s = 0; + t = 0; + } + + // Now we need to transform the point (s, t) to the texture coordinate space + // UV. We know the UV coordinates on points N and P (N_UV and P_UV). Lets + // denote P_UV - N_UV = PN_UV. PN_UV is then 2 dimensional vector that can + // be used to define transformation from the normalized coordinate system + // to the texture coordinate system using a 3x3 affine matrix M: + // + // M = | PN_UV[0] -PN_UV[1] N_UV[0] | + // | PN_UV[1] PN_UV[0] N_UV[1] | + // | 0 0 1 | + // + // The predicted point C_UV in the texture space is then equal to + // C_UV = M * (s, t, 1). Because the triangle in UV space may be flipped + // around the PN_UV axis, we also need to consider point C_UV' = M * (s, -t) + // as the prediction. + const Vector2f pn_uv = p_uv - n_uv; + const float pnus = pn_uv[0] * s + n_uv[0]; + const float pnut = pn_uv[0] * t; + const float pnvs = pn_uv[1] * s + n_uv[1]; + const float pnvt = pn_uv[1] * t; + Vector2f predicted_uv; + + // When decoding the data, we already know which orientation to use. + const bool orientation = orientations_.back(); + orientations_.pop_back(); + if (orientation) + predicted_uv = Vector2f(pnus - pnvt, pnvs + pnut); + else + predicted_uv = Vector2f(pnus + pnvt, pnvs - pnut); + + if (std::is_integral::value) { + // Round the predicted value for integer types. + if (std::isnan(predicted_uv[0])) { + predicted_value_[0] = INT_MIN; + } else { + predicted_value_[0] = static_cast(floor(predicted_uv[0] + 0.5)); + } + if (std::isnan(predicted_uv[1])) { + predicted_value_[1] = INT_MIN; + } else { + predicted_value_[1] = static_cast(floor(predicted_uv[1] + 0.5)); + } + } else { + predicted_value_[0] = static_cast(predicted_uv[0]); + predicted_value_[1] = static_cast(predicted_uv[1]); + } + return; + } + // Else we don't have available textures on both corners. For such case we + // can't use positions for predicting the uv value and we resort to delta + // coding. + int data_offset = 0; + if (prev_data_id < data_id) { + // Use the value on the previous corner as the prediction. + data_offset = prev_data_id * num_components_; + } + if (next_data_id < data_id) { + // Use the value on the next corner as the prediction. + data_offset = next_data_id * num_components_; + } else { + // None of the other corners have a valid value. Use the last encoded value + // as the prediction if possible. + if (data_id > 0) { + data_offset = (data_id - 1) * num_components_; + } else { + // We are encoding the first value. Predict 0. + for (int i = 0; i < num_components_; ++i) { + predicted_value_[i] = 0; + } + return; + } + } + for (int i = 0; i < num_components_; ++i) { + predicted_value_[i] = data[data_offset + i]; + } +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_DECODER_H_ +#endif diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_encoder.h new file mode 100644 index 000000000..813b72ae3 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_encoder.h @@ -0,0 +1,318 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_ENCODER_H_ + +#include + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h" +#include "draco/compression/bit_coders/rans_bit_encoder.h" +#include "draco/core/varint_encoding.h" +#include "draco/core/vector_d.h" +#include "draco/mesh/corner_table.h" + +namespace draco { + +// Prediction scheme designed for predicting texture coordinates from known +// spatial position of vertices. For good parametrization, the ratios between +// triangle edge lengths should be about the same in both the spatial and UV +// coordinate spaces, which makes the positions a good predictor for the UV +// coordinates. +template +class MeshPredictionSchemeTexCoordsEncoder + : public MeshPredictionSchemeEncoder { + public: + using CorrType = typename MeshPredictionSchemeEncoder::CorrType; + MeshPredictionSchemeTexCoordsEncoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeEncoder( + attribute, transform, mesh_data), + pos_attribute_(nullptr), + entry_to_point_id_map_(nullptr), + num_components_(0) {} + + bool ComputeCorrectionValues( + const DataTypeT *in_data, CorrType *out_corr, int size, + int num_components, const PointIndex *entry_to_point_id_map) override; + + bool EncodePredictionData(EncoderBuffer *buffer) override; + + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_TEX_COORDS_DEPRECATED; + } + + bool IsInitialized() const override { + if (pos_attribute_ == nullptr) { + return false; + } + if (!this->mesh_data().IsInitialized()) { + return false; + } + return true; + } + + int GetNumParentAttributes() const override { return 1; } + + GeometryAttribute::Type GetParentAttributeType(int i) const override { + DRACO_DCHECK_EQ(i, 0); + (void)i; + return GeometryAttribute::POSITION; + } + + bool SetParentAttribute(const PointAttribute *att) override { + if (att->attribute_type() != GeometryAttribute::POSITION) { + return false; // Invalid attribute type. + } + if (att->num_components() != 3) { + return false; // Currently works only for 3 component positions. + } + pos_attribute_ = att; + return true; + } + + protected: + Vector3f GetPositionForEntryId(int entry_id) const { + const PointIndex point_id = entry_to_point_id_map_[entry_id]; + Vector3f pos; + pos_attribute_->ConvertValue(pos_attribute_->mapped_index(point_id), + &pos[0]); + return pos; + } + + Vector2f GetTexCoordForEntryId(int entry_id, const DataTypeT *data) const { + const int data_offset = entry_id * num_components_; + return Vector2f(static_cast(data[data_offset]), + static_cast(data[data_offset + 1])); + } + + void ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data, + int data_id); + + private: + const PointAttribute *pos_attribute_; + const PointIndex *entry_to_point_id_map_; + std::unique_ptr predicted_value_; + int num_components_; + // Encoded / decoded array of UV flips. + std::vector orientations_; +}; + +template +bool MeshPredictionSchemeTexCoordsEncoder:: + ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr, + int size, int num_components, + const PointIndex *entry_to_point_id_map) { + num_components_ = num_components; + entry_to_point_id_map_ = entry_to_point_id_map; + predicted_value_ = + std::unique_ptr(new DataTypeT[num_components]); + this->transform().Init(in_data, size, num_components); + // We start processing from the end because this prediction uses data from + // previous entries that could be overwritten when an entry is processed. + for (int p = + static_cast(this->mesh_data().data_to_corner_map()->size()) - 1; + p >= 0; --p) { + const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p); + ComputePredictedValue(corner_id, in_data, p); + + const int dst_offset = p * num_components; + this->transform().ComputeCorrection( + in_data + dst_offset, predicted_value_.get(), out_corr + dst_offset); + } + return true; +} + +template +bool MeshPredictionSchemeTexCoordsEncoder:: + EncodePredictionData(EncoderBuffer *buffer) { + // Encode the delta-coded orientations using arithmetic coding. + const uint32_t num_orientations = static_cast(orientations_.size()); + EncodeVarint(num_orientations, buffer); + bool last_orientation = true; + RAnsBitEncoder encoder; + encoder.StartEncoding(); + for (bool orientation : orientations_) { + encoder.EncodeBit(orientation == last_orientation); + last_orientation = orientation; + } + encoder.EndEncoding(buffer); + return MeshPredictionSchemeEncoder::EncodePredictionData(buffer); +} + +template +void MeshPredictionSchemeTexCoordsEncoder:: + ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data, + int data_id) { + // Compute the predicted UV coordinate from the positions on all corners + // of the processed triangle. For the best prediction, the UV coordinates + // on the next/previous corners need to be already encoded/decoded. + const CornerIndex next_corner_id = + this->mesh_data().corner_table()->Next(corner_id); + const CornerIndex prev_corner_id = + this->mesh_data().corner_table()->Previous(corner_id); + // Get the encoded data ids from the next and previous corners. + // The data id is the encoding order of the UV coordinates. + int next_data_id, prev_data_id; + + int next_vert_id, prev_vert_id; + next_vert_id = + this->mesh_data().corner_table()->Vertex(next_corner_id).value(); + prev_vert_id = + this->mesh_data().corner_table()->Vertex(prev_corner_id).value(); + + next_data_id = this->mesh_data().vertex_to_data_map()->at(next_vert_id); + prev_data_id = this->mesh_data().vertex_to_data_map()->at(prev_vert_id); + + if (prev_data_id < data_id && next_data_id < data_id) { + // Both other corners have available UV coordinates for prediction. + const Vector2f n_uv = GetTexCoordForEntryId(next_data_id, data); + const Vector2f p_uv = GetTexCoordForEntryId(prev_data_id, data); + if (p_uv == n_uv) { + // We cannot do a reliable prediction on degenerated UV triangles. + predicted_value_[0] = static_cast(p_uv[0]); + predicted_value_[1] = static_cast(p_uv[1]); + return; + } + + // Get positions at all corners. + const Vector3f tip_pos = GetPositionForEntryId(data_id); + const Vector3f next_pos = GetPositionForEntryId(next_data_id); + const Vector3f prev_pos = GetPositionForEntryId(prev_data_id); + // Use the positions of the above triangle to predict the texture coordinate + // on the tip corner C. + // Convert the triangle into a new coordinate system defined by orthogonal + // bases vectors S, T, where S is vector prev_pos - next_pos and T is an + // perpendicular vector to S in the same plane as vector the + // tip_pos - next_pos. + // The transformed triangle in the new coordinate system is then going to + // be represented as: + // + // 1 ^ + // | + // | + // | C + // | / \ + // | / \ + // |/ \ + // N--------------P + // 0 1 + // + // Where next_pos point (N) is at position (0, 0), prev_pos point (P) is + // at (1, 0). Our goal is to compute the position of the tip_pos point (C) + // in this new coordinate space (s, t). + // + const Vector3f pn = prev_pos - next_pos; + const Vector3f cn = tip_pos - next_pos; + const float pn_norm2_squared = pn.SquaredNorm(); + // Coordinate s of the tip corner C is simply the dot product of the + // normalized vectors |pn| and |cn| (normalized by the length of |pn|). + // Since both of these vectors are normalized, we don't need to perform the + // normalization explicitly and instead we can just use the squared norm + // of |pn| as a denominator of the resulting dot product of non normalized + // vectors. + float s, t; + // |pn_norm2_squared| can be exactly 0 when the next_pos and prev_pos are + // the same positions (e.g. because they were quantized to the same + // location). + if (pn_norm2_squared > 0) { + s = pn.Dot(cn) / pn_norm2_squared; + // To get the coordinate t, we can use formula: + // t = |C-N - (P-N) * s| / |P-N| + // Do not use std::sqrt to avoid changes in the bitstream. + t = sqrt((cn - pn * s).SquaredNorm() / pn_norm2_squared); + } else { + s = 0; + t = 0; + } + + // Now we need to transform the point (s, t) to the texture coordinate space + // UV. We know the UV coordinates on points N and P (N_UV and P_UV). Lets + // denote P_UV - N_UV = PN_UV. PN_UV is then 2 dimensional vector that can + // be used to define transformation from the normalized coordinate system + // to the texture coordinate system using a 3x3 affine matrix M: + // + // M = | PN_UV[0] -PN_UV[1] N_UV[0] | + // | PN_UV[1] PN_UV[0] N_UV[1] | + // | 0 0 1 | + // + // The predicted point C_UV in the texture space is then equal to + // C_UV = M * (s, t, 1). Because the triangle in UV space may be flipped + // around the PN_UV axis, we also need to consider point C_UV' = M * (s, -t) + // as the prediction. + const Vector2f pn_uv = p_uv - n_uv; + const float pnus = pn_uv[0] * s + n_uv[0]; + const float pnut = pn_uv[0] * t; + const float pnvs = pn_uv[1] * s + n_uv[1]; + const float pnvt = pn_uv[1] * t; + Vector2f predicted_uv; + + // When encoding compute both possible vectors and determine which one + // results in a better prediction. + const Vector2f predicted_uv_0(pnus - pnvt, pnvs + pnut); + const Vector2f predicted_uv_1(pnus + pnvt, pnvs - pnut); + const Vector2f c_uv = GetTexCoordForEntryId(data_id, data); + if ((c_uv - predicted_uv_0).SquaredNorm() < + (c_uv - predicted_uv_1).SquaredNorm()) { + predicted_uv = predicted_uv_0; + orientations_.push_back(true); + } else { + predicted_uv = predicted_uv_1; + orientations_.push_back(false); + } + if (std::is_integral::value) { + // Round the predicted value for integer types. + predicted_value_[0] = static_cast(floor(predicted_uv[0] + 0.5)); + predicted_value_[1] = static_cast(floor(predicted_uv[1] + 0.5)); + } else { + predicted_value_[0] = static_cast(predicted_uv[0]); + predicted_value_[1] = static_cast(predicted_uv[1]); + } + return; + } + // Else we don't have available textures on both corners. For such case we + // can't use positions for predicting the uv value and we resort to delta + // coding. + int data_offset = 0; + if (prev_data_id < data_id) { + // Use the value on the previous corner as the prediction. + data_offset = prev_data_id * num_components_; + } + if (next_data_id < data_id) { + // Use the value on the next corner as the prediction. + data_offset = next_data_id * num_components_; + } else { + // None of the other corners have a valid value. Use the last encoded value + // as the prediction if possible. + if (data_id > 0) { + data_offset = (data_id - 1) * num_components_; + } else { + // We are encoding the first value. Predict 0. + for (int i = 0; i < num_components_; ++i) { + predicted_value_[i] = 0; + } + return; + } + } + for (int i = 0; i < num_components_; ++i) { + predicted_value_[i] = data[data_offset + i]; + } +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_decoder.h new file mode 100644 index 000000000..83d496639 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_decoder.h @@ -0,0 +1,143 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_DECODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h" +#include "draco/compression/bit_coders/rans_bit_decoder.h" + +namespace draco { + +// Decoder for predictions of UV coordinates encoded by our specialized and +// portable texture coordinate predictor. See the corresponding encoder for more +// details. +template +class MeshPredictionSchemeTexCoordsPortableDecoder + : public MeshPredictionSchemeDecoder { + public: + using CorrType = typename MeshPredictionSchemeDecoder::CorrType; + MeshPredictionSchemeTexCoordsPortableDecoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeDecoder( + attribute, transform, mesh_data), + predictor_(mesh_data) {} + + bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int size, int num_components, + const PointIndex *entry_to_point_id_map) override; + + bool DecodePredictionData(DecoderBuffer *buffer) override; + + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_TEX_COORDS_PORTABLE; + } + + bool IsInitialized() const override { + if (!predictor_.IsInitialized()) { + return false; + } + if (!this->mesh_data().IsInitialized()) { + return false; + } + return true; + } + + int GetNumParentAttributes() const override { return 1; } + + GeometryAttribute::Type GetParentAttributeType(int i) const override { + DRACO_DCHECK_EQ(i, 0); + (void)i; + return GeometryAttribute::POSITION; + } + + bool SetParentAttribute(const PointAttribute *att) override { + if (!att || att->attribute_type() != GeometryAttribute::POSITION) { + return false; // Invalid attribute type. + } + if (att->num_components() != 3) { + return false; // Currently works only for 3 component positions. + } + predictor_.SetPositionAttribute(*att); + return true; + } + + private: + MeshPredictionSchemeTexCoordsPortablePredictor + predictor_; +}; + +template +bool MeshPredictionSchemeTexCoordsPortableDecoder< + DataTypeT, TransformT, + MeshDataT>::ComputeOriginalValues(const CorrType *in_corr, + DataTypeT *out_data, int /* size */, + int num_components, + const PointIndex *entry_to_point_id_map) { + if (num_components != MeshPredictionSchemeTexCoordsPortablePredictor< + DataTypeT, MeshDataT>::kNumComponents) { + return false; + } + predictor_.SetEntryToPointIdMap(entry_to_point_id_map); + this->transform().Init(num_components); + + const int corner_map_size = + static_cast(this->mesh_data().data_to_corner_map()->size()); + for (int p = 0; p < corner_map_size; ++p) { + const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p); + if (!predictor_.template ComputePredictedValue(corner_id, out_data, + p)) { + return false; + } + + const int dst_offset = p * num_components; + this->transform().ComputeOriginalValue(predictor_.predicted_value(), + in_corr + dst_offset, + out_data + dst_offset); + } + return true; +} + +template +bool MeshPredictionSchemeTexCoordsPortableDecoder< + DataTypeT, TransformT, MeshDataT>::DecodePredictionData(DecoderBuffer + *buffer) { + // Decode the delta coded orientations. + int32_t num_orientations = 0; + if (!buffer->Decode(&num_orientations) || num_orientations < 0) { + return false; + } + predictor_.ResizeOrientations(num_orientations); + bool last_orientation = true; + RAnsBitDecoder decoder; + if (!decoder.StartDecoding(buffer)) { + return false; + } + for (int i = 0; i < num_orientations; ++i) { + if (!decoder.DecodeNextBit()) { + last_orientation = !last_orientation; + } + predictor_.set_orientation(i, last_orientation); + } + decoder.EndDecoding(); + return MeshPredictionSchemeDecoder::DecodePredictionData(buffer); +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h new file mode 100644 index 000000000..741ec66dc --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h @@ -0,0 +1,133 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_ENCODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h" +#include "draco/compression/bit_coders/rans_bit_encoder.h" + +namespace draco { + +// Prediction scheme designed for predicting texture coordinates from known +// spatial position of vertices. For isometric parametrizations, the ratios +// between triangle edge lengths should be about the same in both the spatial +// and UV coordinate spaces, which makes the positions a good predictor for the +// UV coordinates. Note that this may not be the optimal approach for other +// parametrizations such as projective ones. +template +class MeshPredictionSchemeTexCoordsPortableEncoder + : public MeshPredictionSchemeEncoder { + public: + using CorrType = typename MeshPredictionSchemeEncoder::CorrType; + MeshPredictionSchemeTexCoordsPortableEncoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeEncoder( + attribute, transform, mesh_data), + predictor_(mesh_data) {} + + bool ComputeCorrectionValues( + const DataTypeT *in_data, CorrType *out_corr, int size, + int num_components, const PointIndex *entry_to_point_id_map) override; + + bool EncodePredictionData(EncoderBuffer *buffer) override; + + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_TEX_COORDS_PORTABLE; + } + + bool IsInitialized() const override { + if (!predictor_.IsInitialized()) { + return false; + } + if (!this->mesh_data().IsInitialized()) { + return false; + } + return true; + } + + int GetNumParentAttributes() const override { return 1; } + + GeometryAttribute::Type GetParentAttributeType(int i) const override { + DRACO_DCHECK_EQ(i, 0); + (void)i; + return GeometryAttribute::POSITION; + } + + bool SetParentAttribute(const PointAttribute *att) override { + if (att->attribute_type() != GeometryAttribute::POSITION) { + return false; // Invalid attribute type. + } + if (att->num_components() != 3) { + return false; // Currently works only for 3 component positions. + } + predictor_.SetPositionAttribute(*att); + return true; + } + + private: + MeshPredictionSchemeTexCoordsPortablePredictor + predictor_; +}; + +template +bool MeshPredictionSchemeTexCoordsPortableEncoder:: + ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr, + int size, int num_components, + const PointIndex *entry_to_point_id_map) { + predictor_.SetEntryToPointIdMap(entry_to_point_id_map); + this->transform().Init(in_data, size, num_components); + // We start processing from the end because this prediction uses data from + // previous entries that could be overwritten when an entry is processed. + for (int p = + static_cast(this->mesh_data().data_to_corner_map()->size() - 1); + p >= 0; --p) { + const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p); + predictor_.template ComputePredictedValue(corner_id, in_data, p); + + const int dst_offset = p * num_components; + this->transform().ComputeCorrection(in_data + dst_offset, + predictor_.predicted_value(), + out_corr + dst_offset); + } + return true; +} + +template +bool MeshPredictionSchemeTexCoordsPortableEncoder< + DataTypeT, TransformT, MeshDataT>::EncodePredictionData(EncoderBuffer + *buffer) { + // Encode the delta-coded orientations using arithmetic coding. + const int32_t num_orientations = predictor_.num_orientations(); + buffer->Encode(num_orientations); + bool last_orientation = true; + RAnsBitEncoder encoder; + encoder.StartEncoding(); + for (int i = 0; i < num_orientations; ++i) { + const bool orientation = predictor_.orientation(i); + encoder.EncodeBit(orientation == last_orientation); + last_orientation = orientation; + } + encoder.EndEncoding(buffer); + return MeshPredictionSchemeEncoder::EncodePredictionData(buffer); +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h new file mode 100644 index 000000000..f05e5ddd7 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h @@ -0,0 +1,263 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_PREDICTOR_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_PREDICTOR_H_ + +#include + +#include "draco/attributes/point_attribute.h" +#include "draco/core/math_utils.h" +#include "draco/core/vector_d.h" +#include "draco/mesh/corner_table.h" + +namespace draco { + +// Predictor functionality used for portable UV prediction by both encoder and +// decoder. +template +class MeshPredictionSchemeTexCoordsPortablePredictor { + public: + static constexpr int kNumComponents = 2; + + explicit MeshPredictionSchemeTexCoordsPortablePredictor(const MeshDataT &md) + : pos_attribute_(nullptr), + entry_to_point_id_map_(nullptr), + mesh_data_(md) {} + void SetPositionAttribute(const PointAttribute &position_attribute) { + pos_attribute_ = &position_attribute; + } + void SetEntryToPointIdMap(const PointIndex *map) { + entry_to_point_id_map_ = map; + } + bool IsInitialized() const { return pos_attribute_ != nullptr; } + + VectorD GetPositionForEntryId(int entry_id) const { + const PointIndex point_id = entry_to_point_id_map_[entry_id]; + VectorD pos; + pos_attribute_->ConvertValue(pos_attribute_->mapped_index(point_id), + &pos[0]); + return pos; + } + + VectorD GetTexCoordForEntryId(int entry_id, + const DataTypeT *data) const { + const int data_offset = entry_id * kNumComponents; + return VectorD(data[data_offset], data[data_offset + 1]); + } + + // Computes predicted UV coordinates on a given corner. The coordinates are + // stored in |predicted_value_| member. + template + bool ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data, + int data_id); + + const DataTypeT *predicted_value() const { return predicted_value_; } + bool orientation(int i) const { return orientations_[i]; } + void set_orientation(int i, bool v) { orientations_[i] = v; } + size_t num_orientations() const { return orientations_.size(); } + void ResizeOrientations(int num_orientations) { + orientations_.resize(num_orientations); + } + + private: + const PointAttribute *pos_attribute_; + const PointIndex *entry_to_point_id_map_; + DataTypeT predicted_value_[kNumComponents]; + // Encoded / decoded array of UV flips. + // TODO(ostava): We should remove this and replace this with in-place encoding + // and decoding to avoid unnecessary copy. + std::vector orientations_; + MeshDataT mesh_data_; +}; + +template +template +bool MeshPredictionSchemeTexCoordsPortablePredictor< + DataTypeT, MeshDataT>::ComputePredictedValue(CornerIndex corner_id, + const DataTypeT *data, + int data_id) { + // Compute the predicted UV coordinate from the positions on all corners + // of the processed triangle. For the best prediction, the UV coordinates + // on the next/previous corners need to be already encoded/decoded. + const CornerIndex next_corner_id = mesh_data_.corner_table()->Next(corner_id); + const CornerIndex prev_corner_id = + mesh_data_.corner_table()->Previous(corner_id); + // Get the encoded data ids from the next and previous corners. + // The data id is the encoding order of the UV coordinates. + int next_data_id, prev_data_id; + + int next_vert_id, prev_vert_id; + next_vert_id = mesh_data_.corner_table()->Vertex(next_corner_id).value(); + prev_vert_id = mesh_data_.corner_table()->Vertex(prev_corner_id).value(); + + next_data_id = mesh_data_.vertex_to_data_map()->at(next_vert_id); + prev_data_id = mesh_data_.vertex_to_data_map()->at(prev_vert_id); + + if (prev_data_id < data_id && next_data_id < data_id) { + // Both other corners have available UV coordinates for prediction. + const VectorD n_uv = GetTexCoordForEntryId(next_data_id, data); + const VectorD p_uv = GetTexCoordForEntryId(prev_data_id, data); + if (p_uv == n_uv) { + // We cannot do a reliable prediction on degenerated UV triangles. + predicted_value_[0] = p_uv[0]; + predicted_value_[1] = p_uv[1]; + return true; + } + + // Get positions at all corners. + const VectorD tip_pos = GetPositionForEntryId(data_id); + const VectorD next_pos = GetPositionForEntryId(next_data_id); + const VectorD prev_pos = GetPositionForEntryId(prev_data_id); + // We use the positions of the above triangle to predict the texture + // coordinate on the tip corner C. + // To convert the triangle into the UV coordinate system we first compute + // position X on the vector |prev_pos - next_pos| that is the projection of + // point C onto vector |prev_pos - next_pos|: + // + // C + // /. \ + // / . \ + // / . \ + // N---X----------P + // + // Where next_pos is point (N), prev_pos is point (P) and tip_pos is the + // position of predicted coordinate (C). + // + const VectorD pn = prev_pos - next_pos; + const uint64_t pn_norm2_squared = pn.SquaredNorm(); + if (pn_norm2_squared != 0) { + // Compute the projection of C onto PN by computing dot product of CN with + // PN and normalizing it by length of PN. This gives us a factor |s| where + // |s = PN.Dot(CN) / PN.SquaredNorm2()|. This factor can be used to + // compute X in UV space |X_UV| as |X_UV = N_UV + s * PN_UV|. + const VectorD cn = tip_pos - next_pos; + const int64_t cn_dot_pn = pn.Dot(cn); + + const VectorD pn_uv = p_uv - n_uv; + // Because we perform all computations with integers, we don't explicitly + // compute the normalized factor |s|, but rather we perform all operations + // over UV vectors in a non-normalized coordinate system scaled with a + // scaling factor |pn_norm2_squared|: + // + // x_uv = X_UV * PN.Norm2Squared() + // + const VectorD x_uv = + n_uv * pn_norm2_squared + (cn_dot_pn * pn_uv); + + const int64_t pn_absmax_element = + std::max(std::max(std::abs(pn[0]), std::abs(pn[1])), std::abs(pn[2])); + if (cn_dot_pn > std::numeric_limits::max() / pn_absmax_element) { + // return false if squared length calculation would overflow. + return false; + } + + // Compute squared length of vector CX in position coordinate system: + const VectorD x_pos = + next_pos + (cn_dot_pn * pn) / pn_norm2_squared; + const uint64_t cx_norm2_squared = (tip_pos - x_pos).SquaredNorm(); + + // Compute vector CX_UV in the uv space by rotating vector PN_UV by 90 + // degrees and scaling it with factor CX.Norm2() / PN.Norm2(): + // + // CX_UV = (CX.Norm2() / PN.Norm2()) * Rot(PN_UV) + // + // To preserve precision, we perform all operations in scaled space as + // explained above, so we want the final vector to be: + // + // cx_uv = CX_UV * PN.Norm2Squared() + // + // We can then rewrite the formula as: + // + // cx_uv = CX.Norm2() * PN.Norm2() * Rot(PN_UV) + // + VectorD cx_uv(pn_uv[1], -pn_uv[0]); // Rotated PN_UV. + // Compute CX.Norm2() * PN.Norm2() + const uint64_t norm_squared = + IntSqrt(cx_norm2_squared * pn_norm2_squared); + // Final cx_uv in the scaled coordinate space. + cx_uv = cx_uv * norm_squared; + + // Predicted uv coordinate is then computed by either adding or + // subtracting CX_UV to/from X_UV. + VectorD predicted_uv; + if (is_encoder_t) { + // When encoding, compute both possible vectors and determine which one + // results in a better prediction. + // Both vectors need to be transformed back from the scaled space to + // the real UV coordinate space. + const VectorD predicted_uv_0((x_uv + cx_uv) / + pn_norm2_squared); + const VectorD predicted_uv_1((x_uv - cx_uv) / + pn_norm2_squared); + const VectorD c_uv = GetTexCoordForEntryId(data_id, data); + if ((c_uv - predicted_uv_0).SquaredNorm() < + (c_uv - predicted_uv_1).SquaredNorm()) { + predicted_uv = predicted_uv_0; + orientations_.push_back(true); + } else { + predicted_uv = predicted_uv_1; + orientations_.push_back(false); + } + } else { + // When decoding the data, we already know which orientation to use. + if (orientations_.empty()) { + return false; + } + const bool orientation = orientations_.back(); + orientations_.pop_back(); + if (orientation) { + predicted_uv = (x_uv + cx_uv) / pn_norm2_squared; + } else { + predicted_uv = (x_uv - cx_uv) / pn_norm2_squared; + } + } + predicted_value_[0] = static_cast(predicted_uv[0]); + predicted_value_[1] = static_cast(predicted_uv[1]); + return true; + } + } + // Else we don't have available textures on both corners or the position data + // is invalid. For such cases we can't use positions for predicting the uv + // value and we resort to delta coding. + int data_offset = 0; + if (prev_data_id < data_id) { + // Use the value on the previous corner as the prediction. + data_offset = prev_data_id * kNumComponents; + } + if (next_data_id < data_id) { + // Use the value on the next corner as the prediction. + data_offset = next_data_id * kNumComponents; + } else { + // None of the other corners have a valid value. Use the last encoded value + // as the prediction if possible. + if (data_id > 0) { + data_offset = (data_id - 1) * kNumComponents; + } else { + // We are encoding the first value. Predict 0. + for (int i = 0; i < kNumComponents; ++i) { + predicted_value_[i] = 0; + } + return true; + } + } + for (int i = 0; i < kNumComponents; ++i) { + predicted_value_[i] = data[data_offset + i]; + } + return true; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_PREDICTOR_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h new file mode 100644 index 000000000..064e1b44f --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h @@ -0,0 +1,90 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_H_ + +#include + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_interface.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoding_transform.h" + +// Prediction schemes can be used during encoding and decoding of vertex +// attributes to predict attribute values based on the previously +// encoded/decoded data. The differences between the original and predicted +// attribute values are used to compute correction values that can be usually +// encoded with fewer bits compared to the original data. +namespace draco { + +// Abstract base class for typed prediction schemes. It provides basic access +// to the encoded attribute and to the supplied prediction transform. +template > +class PredictionSchemeDecoder : public PredictionSchemeTypedDecoderInterface< + DataTypeT, typename TransformT::CorrType> { + public: + typedef DataTypeT DataType; + typedef TransformT Transform; + // Correction type needs to be defined in the prediction transform class. + typedef typename Transform::CorrType CorrType; + explicit PredictionSchemeDecoder(const PointAttribute *attribute) + : PredictionSchemeDecoder(attribute, Transform()) {} + PredictionSchemeDecoder(const PointAttribute *attribute, + const Transform &transform) + : attribute_(attribute), transform_(transform) {} + + bool DecodePredictionData(DecoderBuffer *buffer) override { + if (!transform_.DecodeTransformData(buffer)) { + return false; + } + return true; + } + + const PointAttribute *GetAttribute() const override { return attribute(); } + + // Returns the number of parent attributes that are needed for the prediction. + int GetNumParentAttributes() const override { return 0; } + + // Returns the type of each of the parent attribute. + GeometryAttribute::Type GetParentAttributeType(int /* i */) const override { + return GeometryAttribute::INVALID; + } + + // Sets the required parent attribute. + bool SetParentAttribute(const PointAttribute * /* att */) override { + return false; + } + + bool AreCorrectionsPositive() override { + return transform_.AreCorrectionsPositive(); + } + + PredictionSchemeTransformType GetTransformType() const override { + return transform_.GetType(); + } + + protected: + inline const PointAttribute *attribute() const { return attribute_; } + inline const Transform &transform() const { return transform_; } + inline Transform &transform() { return transform_; } + + private: + const PointAttribute *attribute_; + Transform transform_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h new file mode 100644 index 000000000..cf2a6ba6b --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h @@ -0,0 +1,194 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Functions for creating prediction schemes for decoders using the provided +// prediction method id. + +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_FACTORY_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_FACTORY_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h" +#include "draco/draco_features.h" +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_decoder.h" +#endif +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_decoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_decoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_decoder.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_delta_decoder.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h" +#include "draco/compression/mesh/mesh_decoder.h" + +namespace draco { + +// Factory class for creating mesh prediction schemes. The factory implements +// operator() that is used to create an appropriate mesh prediction scheme in +// CreateMeshPredictionScheme() function in prediction_scheme_factory.h +template +struct MeshPredictionSchemeDecoderFactory { + // Operator () specialized for the wrap transform. Wrap transform can be used + // for all mesh prediction schemes. The specialization is done in compile time + // to prevent instantiations of unneeded combinations of prediction schemes + + // prediction transforms. + template + struct DispatchFunctor { + std::unique_ptr> operator()( + PredictionSchemeMethod method, const PointAttribute *attribute, + const TransformT &transform, const MeshDataT &mesh_data, + uint16_t bitstream_version) { + if (method == MESH_PREDICTION_PARALLELOGRAM) { + return std::unique_ptr>( + new MeshPredictionSchemeParallelogramDecoder( + attribute, transform, mesh_data)); + } +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + else if (method == MESH_PREDICTION_MULTI_PARALLELOGRAM) { + return std::unique_ptr>( + new MeshPredictionSchemeMultiParallelogramDecoder< + DataTypeT, TransformT, MeshDataT>(attribute, transform, + mesh_data)); + } +#endif + else if (method == MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM) { + return std::unique_ptr>( + new MeshPredictionSchemeConstrainedMultiParallelogramDecoder< + DataTypeT, TransformT, MeshDataT>(attribute, transform, + mesh_data)); + } +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + else if (method == MESH_PREDICTION_TEX_COORDS_DEPRECATED) { + return std::unique_ptr>( + new MeshPredictionSchemeTexCoordsDecoder( + attribute, transform, mesh_data, bitstream_version)); + } +#endif + else if (method == MESH_PREDICTION_TEX_COORDS_PORTABLE) { + return std::unique_ptr>( + new MeshPredictionSchemeTexCoordsPortableDecoder< + DataTypeT, TransformT, MeshDataT>(attribute, transform, + mesh_data)); + } +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED + else if (method == MESH_PREDICTION_GEOMETRIC_NORMAL) { + return std::unique_ptr>( + new MeshPredictionSchemeGeometricNormalDecoder< + DataTypeT, TransformT, MeshDataT>(attribute, transform, + mesh_data)); + } +#endif + return nullptr; + } + }; + +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED + // Operator () specialized for normal octahedron transforms. These transforms + // are currently used only by the geometric normal prediction scheme (the + // transform is also used by delta coding, but delta predictor is not + // constructed in this function). + template + struct DispatchFunctor { + std::unique_ptr> operator()( + PredictionSchemeMethod method, const PointAttribute *attribute, + const TransformT &transform, const MeshDataT &mesh_data, + uint16_t bitstream_version) { + if (method == MESH_PREDICTION_GEOMETRIC_NORMAL) { + return std::unique_ptr>( + new MeshPredictionSchemeGeometricNormalDecoder< + DataTypeT, TransformT, MeshDataT>(attribute, transform, + mesh_data)); + } + return nullptr; + } + }; + template + struct DispatchFunctor { + std::unique_ptr> operator()( + PredictionSchemeMethod method, const PointAttribute *attribute, + const TransformT &transform, const MeshDataT &mesh_data, + uint16_t bitstream_version) { + if (method == MESH_PREDICTION_GEOMETRIC_NORMAL) { + return std::unique_ptr>( + new MeshPredictionSchemeGeometricNormalDecoder< + DataTypeT, TransformT, MeshDataT>(attribute, transform, + mesh_data)); + } + return nullptr; + } + }; +#endif + + template + std::unique_ptr> operator()( + PredictionSchemeMethod method, const PointAttribute *attribute, + const TransformT &transform, const MeshDataT &mesh_data, + uint16_t bitstream_version) { + return DispatchFunctor()( + method, attribute, transform, mesh_data, bitstream_version); + } +}; + +// Creates a prediction scheme for a given decoder and given prediction method. +// The prediction schemes are automatically initialized with decoder specific +// data if needed. +template +std::unique_ptr> +CreatePredictionSchemeForDecoder(PredictionSchemeMethod method, int att_id, + const PointCloudDecoder *decoder, + const TransformT &transform) { + if (method == PREDICTION_NONE) { + return nullptr; + } + const PointAttribute *const att = decoder->point_cloud()->attribute(att_id); + if (decoder->GetGeometryType() == TRIANGULAR_MESH) { + // Cast the decoder to mesh decoder. This is not necessarily safe if there + // is some other decoder decides to use TRIANGULAR_MESH as the return type, + // but unfortunately there is not nice work around for this without using + // RTTI (double dispatch and similar concepts will not work because of the + // template nature of the prediction schemes). + const MeshDecoder *const mesh_decoder = + static_cast(decoder); + + auto ret = CreateMeshPredictionScheme< + MeshDecoder, PredictionSchemeDecoder, + MeshPredictionSchemeDecoderFactory>( + mesh_decoder, method, att_id, transform, decoder->bitstream_version()); + if (ret) { + return ret; + } + // Otherwise try to create another prediction scheme. + } + // Create delta decoder. + return std::unique_ptr>( + new PredictionSchemeDeltaDecoder(att, transform)); +} + +// Create a prediction scheme using a default transform constructor. +template +std::unique_ptr> +CreatePredictionSchemeForDecoder(PredictionSchemeMethod method, int att_id, + const PointCloudDecoder *decoder) { + return CreatePredictionSchemeForDecoder( + method, att_id, decoder, TransformT()); +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_FACTORY_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_interface.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_interface.h new file mode 100644 index 000000000..6f19f7fdb --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_interface.h @@ -0,0 +1,53 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_INTERFACE_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_INTERFACE_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h" +#include "draco/core/decoder_buffer.h" + +// Prediction schemes can be used during encoding and decoding of attributes +// to predict attribute values based on the previously encoded/decoded data. +// See prediction_scheme.h for more details. +namespace draco { + +// Abstract interface for all prediction schemes used during attribute encoding. +class PredictionSchemeDecoderInterface : public PredictionSchemeInterface { + public: + // Method that can be used to decode any prediction scheme specific data + // from the input buffer. + virtual bool DecodePredictionData(DecoderBuffer *buffer) = 0; +}; + +// A specialized version of the prediction scheme interface for specific +// input and output data types. +// |entry_to_point_id_map| is the mapping between value entries to point ids +// of the associated point cloud, where one entry is defined as |num_components| +// values of the |in_data|. +// DataTypeT is the data type of input and predicted values. +// CorrTypeT is the data type used for storing corrected values. +template +class PredictionSchemeTypedDecoderInterface + : public PredictionSchemeDecoderInterface { + public: + // Reverts changes made by the prediction scheme during encoding. + virtual bool ComputeOriginalValues( + const CorrTypeT *in_corr, DataTypeT *out_data, int size, + int num_components, const PointIndex *entry_to_point_id_map) = 0; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_INTERFACE_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoding_transform.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoding_transform.h new file mode 100644 index 000000000..47c1532ad --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoding_transform.h @@ -0,0 +1,65 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODING_TRANSFORM_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODING_TRANSFORM_H_ + +#include "draco/compression/config/compression_shared.h" +#include "draco/core/decoder_buffer.h" + +namespace draco { + +// PredictionSchemeDecodingTransform is used to transform predicted values and +// correction values into the final original attribute values. +// DataTypeT is the data type of predicted values. +// CorrTypeT is the data type used for storing corrected values. It allows +// transforms to store corrections into a different type or format compared to +// the predicted data. +template +class PredictionSchemeDecodingTransform { + public: + typedef CorrTypeT CorrType; + PredictionSchemeDecodingTransform() : num_components_(0) {} + + void Init(int num_components) { num_components_ = num_components; } + + // Computes the original value from the input predicted value and the decoded + // corrections. The default implementation is equal to std:plus. + inline void ComputeOriginalValue(const DataTypeT *predicted_vals, + const CorrTypeT *corr_vals, + DataTypeT *out_original_vals) const { + static_assert(std::is_same::value, + "For the default prediction transform, correction and input " + "data must be of the same type."); + for (int i = 0; i < num_components_; ++i) { + out_original_vals[i] = predicted_vals[i] + corr_vals[i]; + } + } + + // Decodes any transform specific data. Called before Init() method. + bool DecodeTransformData(DecoderBuffer * /* buffer */) { return true; } + + // Should return true if all corrected values are guaranteed to be positive. + bool AreCorrectionsPositive() const { return false; } + + protected: + int num_components() const { return num_components_; } + + private: + int num_components_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODING_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_decoder.h new file mode 100644 index 000000000..ae72c7120 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_decoder.h @@ -0,0 +1,65 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_DECODER_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h" + +namespace draco { + +// Decoder for values encoded with delta coding. See the corresponding encoder +// for more details. +template +class PredictionSchemeDeltaDecoder + : public PredictionSchemeDecoder { + public: + using CorrType = + typename PredictionSchemeDecoder::CorrType; + // Initialized the prediction scheme. + explicit PredictionSchemeDeltaDecoder(const PointAttribute *attribute) + : PredictionSchemeDecoder(attribute) {} + PredictionSchemeDeltaDecoder(const PointAttribute *attribute, + const TransformT &transform) + : PredictionSchemeDecoder(attribute, transform) {} + + bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int size, int num_components, + const PointIndex *entry_to_point_id_map) override; + PredictionSchemeMethod GetPredictionMethod() const override { + return PREDICTION_DIFFERENCE; + } + bool IsInitialized() const override { return true; } +}; + +template +bool PredictionSchemeDeltaDecoder::ComputeOriginalValues( + const CorrType *in_corr, DataTypeT *out_data, int size, int num_components, + const PointIndex *) { + this->transform().Init(num_components); + // Decode the original value for the first element. + std::unique_ptr zero_vals(new DataTypeT[num_components]()); + this->transform().ComputeOriginalValue(zero_vals.get(), in_corr, out_data); + + // Decode data from the front using D(i) = D(i) + D(i - 1). + for (int i = num_components; i < size; i += num_components) { + this->transform().ComputeOriginalValue(out_data + i - num_components, + in_corr + i, out_data + i); + } + return true; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_encoder.h new file mode 100644 index 000000000..324afafa6 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_encoder.h @@ -0,0 +1,69 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_ENCODER_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h" + +namespace draco { + +// Basic prediction scheme based on computing backward differences between +// stored attribute values (also known as delta-coding). Usually works better +// than the reference point prediction scheme, because nearby values are often +// encoded next to each other. +template +class PredictionSchemeDeltaEncoder + : public PredictionSchemeEncoder { + public: + using CorrType = + typename PredictionSchemeEncoder::CorrType; + // Initialized the prediction scheme. + explicit PredictionSchemeDeltaEncoder(const PointAttribute *attribute) + : PredictionSchemeEncoder(attribute) {} + PredictionSchemeDeltaEncoder(const PointAttribute *attribute, + const TransformT &transform) + : PredictionSchemeEncoder(attribute, transform) {} + + bool ComputeCorrectionValues( + const DataTypeT *in_data, CorrType *out_corr, int size, + int num_components, const PointIndex *entry_to_point_id_map) override; + PredictionSchemeMethod GetPredictionMethod() const override { + return PREDICTION_DIFFERENCE; + } + bool IsInitialized() const override { return true; } +}; + +template +bool PredictionSchemeDeltaEncoder< + DataTypeT, TransformT>::ComputeCorrectionValues(const DataTypeT *in_data, + CorrType *out_corr, + int size, + int num_components, + const PointIndex *) { + this->transform().Init(in_data, size, num_components); + // Encode data from the back using D(i) = D(i) - D(i - 1). + for (int i = size - num_components; i > 0; i -= num_components) { + this->transform().ComputeCorrection( + in_data + i, in_data + i - num_components, out_corr + i); + } + // Encode correction for the first element. + std::unique_ptr zero_vals(new DataTypeT[num_components]()); + this->transform().ComputeCorrection(in_data, zero_vals.get(), out_corr); + return true; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h new file mode 100644 index 000000000..2a211a9fc --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h @@ -0,0 +1,90 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_H_ + +#include + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_interface.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoding_transform.h" + +// Prediction schemes can be used during encoding and decoding of vertex +// attributes to predict attribute values based on the previously +// encoded/decoded data. The differences between the original and predicted +// attribute values are used to compute correction values that can be usually +// encoded with fewer bits compared to the original data. +namespace draco { + +// Abstract base class for typed prediction schemes. It provides basic access +// to the encoded attribute and to the supplied prediction transform. +template > +class PredictionSchemeEncoder : public PredictionSchemeTypedEncoderInterface< + DataTypeT, typename TransformT::CorrType> { + public: + typedef DataTypeT DataType; + typedef TransformT Transform; + // Correction type needs to be defined in the prediction transform class. + typedef typename Transform::CorrType CorrType; + explicit PredictionSchemeEncoder(const PointAttribute *attribute) + : PredictionSchemeEncoder(attribute, Transform()) {} + PredictionSchemeEncoder(const PointAttribute *attribute, + const Transform &transform) + : attribute_(attribute), transform_(transform) {} + + bool EncodePredictionData(EncoderBuffer *buffer) override { + if (!transform_.EncodeTransformData(buffer)) { + return false; + } + return true; + } + + const PointAttribute *GetAttribute() const override { return attribute(); } + + // Returns the number of parent attributes that are needed for the prediction. + int GetNumParentAttributes() const override { return 0; } + + // Returns the type of each of the parent attribute. + GeometryAttribute::Type GetParentAttributeType(int /* i */) const override { + return GeometryAttribute::INVALID; + } + + // Sets the required parent attribute. + bool SetParentAttribute(const PointAttribute * /* att */) override { + return false; + } + + bool AreCorrectionsPositive() override { + return transform_.AreCorrectionsPositive(); + } + + PredictionSchemeTransformType GetTransformType() const override { + return transform_.GetType(); + } + + protected: + inline const PointAttribute *attribute() const { return attribute_; } + inline const Transform &transform() const { return transform_; } + inline Transform &transform() { return transform_; } + + private: + const PointAttribute *attribute_; + Transform transform_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc new file mode 100644 index 000000000..f410a6cd2 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc @@ -0,0 +1,85 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h" + +namespace draco { + +PredictionSchemeMethod SelectPredictionMethod( + int att_id, const PointCloudEncoder *encoder) { + if (encoder->options()->GetSpeed() >= 10) { + // Selected fastest, though still doing some compression. + return PREDICTION_DIFFERENCE; + } + if (encoder->GetGeometryType() == TRIANGULAR_MESH) { + // Use speed setting to select the best encoding method. + const PointAttribute *const att = encoder->point_cloud()->attribute(att_id); + if (att->attribute_type() == GeometryAttribute::TEX_COORD) { + if (encoder->options()->GetSpeed() < 4) { + // Use texture coordinate prediction for speeds 0, 1, 2, 3. + return MESH_PREDICTION_TEX_COORDS_PORTABLE; + } + } + if (att->attribute_type() == GeometryAttribute::NORMAL) { +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED + if (encoder->options()->GetSpeed() < 4) { + // Use geometric normal prediction for speeds 0, 1, 2, 3. + // For this prediction, the position attribute needs to be either + // integer or quantized as well. + const int pos_att_id = encoder->point_cloud()->GetNamedAttributeId( + GeometryAttribute::POSITION); + const PointAttribute *const pos_att = + encoder->point_cloud()->GetNamedAttribute( + GeometryAttribute::POSITION); + if (pos_att && (IsDataTypeIntegral(pos_att->data_type()) || + encoder->options()->GetAttributeInt( + pos_att_id, "quantization_bits", -1) > 0)) { + return MESH_PREDICTION_GEOMETRIC_NORMAL; + } + } +#endif + return PREDICTION_DIFFERENCE; // default + } + // Handle other attribute types. + if (encoder->options()->GetSpeed() >= 8) { + return PREDICTION_DIFFERENCE; + } + if (encoder->options()->GetSpeed() >= 2 || + encoder->point_cloud()->num_points() < 40) { + // Parallelogram prediction is used for speeds 2 - 7 or when the overhead + // of using constrained multi-parallelogram would be too high. + return MESH_PREDICTION_PARALLELOGRAM; + } + // Multi-parallelogram is used for speeds 0, 1. + return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM; + } + // Default option is delta coding. + return PREDICTION_DIFFERENCE; +} + +// Returns the preferred prediction scheme based on the encoder options. +PredictionSchemeMethod GetPredictionMethodFromOptions( + int att_id, const EncoderOptions &options) { + const int pred_type = + options.GetAttributeInt(att_id, "prediction_scheme", -1); + if (pred_type == -1) { + return PREDICTION_UNDEFINED; + } + if (pred_type < 0 || pred_type >= NUM_PREDICTION_SCHEMES) { + return PREDICTION_NONE; + } + return static_cast(pred_type); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h new file mode 100644 index 000000000..40a7683aa --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h @@ -0,0 +1,129 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Functions for creating prediction schemes for encoders using the provided +// prediction method id. + +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_FACTORY_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_FACTORY_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h" +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_encoder.h" +#endif +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_encoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_encoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_delta_encoder.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h" +#include "draco/compression/mesh/mesh_encoder.h" + +namespace draco { + +// Selects a prediction method based on the input geometry type and based on the +// encoder options. +PredictionSchemeMethod SelectPredictionMethod(int att_id, + const PointCloudEncoder *encoder); + +// Factory class for creating mesh prediction schemes. +template +struct MeshPredictionSchemeEncoderFactory { + template + std::unique_ptr> operator()( + PredictionSchemeMethod method, const PointAttribute *attribute, + const TransformT &transform, const MeshDataT &mesh_data, + uint16_t bitstream_version) { + if (method == MESH_PREDICTION_PARALLELOGRAM) { + return std::unique_ptr>( + new MeshPredictionSchemeParallelogramEncoder( + attribute, transform, mesh_data)); + } else if (method == MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM) { + return std::unique_ptr>( + new MeshPredictionSchemeConstrainedMultiParallelogramEncoder< + DataTypeT, TransformT, MeshDataT>(attribute, transform, + mesh_data)); + } else if (method == MESH_PREDICTION_TEX_COORDS_PORTABLE) { + return std::unique_ptr>( + new MeshPredictionSchemeTexCoordsPortableEncoder< + DataTypeT, TransformT, MeshDataT>(attribute, transform, + mesh_data)); + } +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED + else if (method == MESH_PREDICTION_GEOMETRIC_NORMAL) { + return std::unique_ptr>( + new MeshPredictionSchemeGeometricNormalEncoder( + attribute, transform, mesh_data)); + } +#endif + return nullptr; + } +}; + +// Creates a prediction scheme for a given encoder and given prediction method. +// The prediction schemes are automatically initialized with encoder specific +// data if needed. +template +std::unique_ptr> +CreatePredictionSchemeForEncoder(PredictionSchemeMethod method, int att_id, + const PointCloudEncoder *encoder, + const TransformT &transform) { + const PointAttribute *const att = encoder->point_cloud()->attribute(att_id); + if (method == PREDICTION_UNDEFINED) { + method = SelectPredictionMethod(att_id, encoder); + } + if (method == PREDICTION_NONE) { + return nullptr; // No prediction is used. + } + if (encoder->GetGeometryType() == TRIANGULAR_MESH) { + // Cast the encoder to mesh encoder. This is not necessarily safe if there + // is some other encoder decides to use TRIANGULAR_MESH as the return type, + // but unfortunately there is not nice work around for this without using + // RTTI (double dispatch and similar concepts will not work because of the + // template nature of the prediction schemes). + const MeshEncoder *const mesh_encoder = + static_cast(encoder); + auto ret = CreateMeshPredictionScheme< + MeshEncoder, PredictionSchemeEncoder, + MeshPredictionSchemeEncoderFactory>( + mesh_encoder, method, att_id, transform, kDracoMeshBitstreamVersion); + if (ret) { + return ret; + } + // Otherwise try to create another prediction scheme. + } + // Create delta encoder. + return std::unique_ptr>( + new PredictionSchemeDeltaEncoder(att, transform)); +} + +// Create a prediction scheme using a default transform constructor. +template +std::unique_ptr> +CreatePredictionSchemeForEncoder(PredictionSchemeMethod method, int att_id, + const PointCloudEncoder *encoder) { + return CreatePredictionSchemeForEncoder( + method, att_id, encoder, TransformT()); +} + +// Returns the preferred prediction scheme based on the encoder options. +PredictionSchemeMethod GetPredictionMethodFromOptions( + int att_id, const EncoderOptions &options); + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_FACTORY_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_interface.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_interface.h new file mode 100644 index 000000000..37aa9f76a --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_interface.h @@ -0,0 +1,55 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_INTERFACE_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_INTERFACE_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h" +#include "draco/core/encoder_buffer.h" + +// Prediction schemes can be used during encoding and decoding of attributes +// to predict attribute values based on the previously encoded/decoded data. +// See prediction_scheme.h for more details. +namespace draco { + +// Abstract interface for all prediction schemes used during attribute encoding. +class PredictionSchemeEncoderInterface : public PredictionSchemeInterface { + public: + // Method that can be used to encode any prediction scheme specific data + // into the output buffer. + virtual bool EncodePredictionData(EncoderBuffer *buffer) = 0; +}; + +// A specialized version of the prediction scheme interface for specific +// input and output data types. +// |entry_to_point_id_map| is the mapping between value entries to point ids +// of the associated point cloud, where one entry is defined as |num_components| +// values of the |in_data|. +// DataTypeT is the data type of input and predicted values. +// CorrTypeT is the data type used for storing corrected values. +template +class PredictionSchemeTypedEncoderInterface + : public PredictionSchemeEncoderInterface { + public: + // Applies the prediction scheme when encoding the attribute. + // |in_data| contains value entries to be encoded. + // |out_corr| is an output array containing the to be encoded corrections. + virtual bool ComputeCorrectionValues( + const DataTypeT *in_data, CorrTypeT *out_corr, int size, + int num_components, const PointIndex *entry_to_point_id_map) = 0; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_INTERFACE_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoding_transform.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoding_transform.h new file mode 100644 index 000000000..0929492aa --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoding_transform.h @@ -0,0 +1,77 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODING_TRANSFORM_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODING_TRANSFORM_H_ + +#include "draco/compression/config/compression_shared.h" +#include "draco/core/encoder_buffer.h" + +namespace draco { + +// PredictionSchemeEncodingTransform is used to transform predicted values into +// correction values. +// CorrTypeT is the data type used for storing corrected values. It allows +// transforms to store corrections into a different type or format compared to +// the predicted data. +template +class PredictionSchemeEncodingTransform { + public: + typedef CorrTypeT CorrType; + PredictionSchemeEncodingTransform() : num_components_(0) {} + + PredictionSchemeTransformType GetType() const { + return PREDICTION_TRANSFORM_DELTA; + } + + // Performs any custom initialization of the transform for the encoder. + // |size| = total number of values in |orig_data| (i.e., number of entries * + // number of components). + void Init(const DataTypeT * /* orig_data */, int /* size */, + int num_components) { + num_components_ = num_components; + } + + // Computes the corrections based on the input original values and the + // predicted values. The correction is always computed for all components + // of the input element. |val_id| is the id of the input value + // (i.e., element_id * num_components). The default implementation is equal to + // std::minus. + inline void ComputeCorrection(const DataTypeT *original_vals, + const DataTypeT *predicted_vals, + CorrTypeT *out_corr_vals) { + static_assert(std::is_same::value, + "For the default prediction transform, correction and input " + "data must be of the same type."); + for (int i = 0; i < num_components_; ++i) { + out_corr_vals[i] = original_vals[i] - predicted_vals[i]; + } + } + + // Encode any transform specific data. + bool EncodeTransformData(EncoderBuffer * /* buffer */) { return true; } + + // Should return true if all corrected values are guaranteed to be positive. + bool AreCorrectionsPositive() const { return false; } + + protected: + int num_components() const { return num_components_; } + + private: + int num_components_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODING_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h new file mode 100644 index 000000000..b36c4c8a2 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h @@ -0,0 +1,85 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Functions for creating prediction schemes from a provided prediction method +// name. The functions in this file can create only basic prediction schemes +// that don't require any encoder or decoder specific data. To create more +// sophisticated prediction schemes, use functions from either +// prediction_scheme_encoder_factory.h or, +// prediction_scheme_decoder_factory.h. + +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_FACTORY_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_FACTORY_H_ + +#include "draco/compression/attributes/mesh_attribute_indices_encoding_data.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h" +#include "draco/compression/config/compression_shared.h" +#include "draco/mesh/mesh_attribute_corner_table.h" + +namespace draco { + +template +std::unique_ptr CreateMeshPredictionScheme( + const EncodingDataSourceT *source, PredictionSchemeMethod method, + int att_id, const typename PredictionSchemeT::Transform &transform, + uint16_t bitstream_version) { + const PointAttribute *const att = source->point_cloud()->attribute(att_id); + if (source->GetGeometryType() == TRIANGULAR_MESH && + (method == MESH_PREDICTION_PARALLELOGRAM || + method == MESH_PREDICTION_MULTI_PARALLELOGRAM || + method == MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM || + method == MESH_PREDICTION_TEX_COORDS_PORTABLE || + method == MESH_PREDICTION_GEOMETRIC_NORMAL || + method == MESH_PREDICTION_TEX_COORDS_DEPRECATED)) { + const CornerTable *const ct = source->GetCornerTable(); + const MeshAttributeIndicesEncodingData *const encoding_data = + source->GetAttributeEncodingData(att_id); + if (ct == nullptr || encoding_data == nullptr) { + // No connectivity data found. + return nullptr; + } + // Connectivity data exists. + const MeshAttributeCornerTable *const att_ct = + source->GetAttributeCornerTable(att_id); + if (att_ct != nullptr) { + typedef MeshPredictionSchemeData MeshData; + MeshData md; + md.Set(source->mesh(), att_ct, + &encoding_data->encoded_attribute_value_index_to_corner_map, + &encoding_data->vertex_to_encoded_attribute_value_index_map); + MeshPredictionSchemeFactoryT factory; + auto ret = factory(method, att, transform, md, bitstream_version); + if (ret) { + return ret; + } + } else { + typedef MeshPredictionSchemeData MeshData; + MeshData md; + md.Set(source->mesh(), ct, + &encoding_data->encoded_attribute_value_index_to_corner_map, + &encoding_data->vertex_to_encoded_attribute_value_index_map); + MeshPredictionSchemeFactoryT factory; + auto ret = factory(method, att, transform, md, bitstream_version); + if (ret) { + return ret; + } + } + } + return nullptr; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_FACTORY_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h new file mode 100644 index 000000000..c9b370693 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h @@ -0,0 +1,60 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_INTERFACE_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_INTERFACE_H_ + +#include "draco/attributes/point_attribute.h" +#include "draco/compression/config/compression_shared.h" + +// Prediction schemes can be used during encoding and decoding of attributes +// to predict attribute values based on the previously encoded/decoded data. +// See prediction_scheme.h for more details. +namespace draco { + +// Abstract interface for all prediction schemes used during attribute encoding. +class PredictionSchemeInterface { + public: + virtual ~PredictionSchemeInterface() = default; + virtual PredictionSchemeMethod GetPredictionMethod() const = 0; + + // Returns the encoded attribute. + virtual const PointAttribute *GetAttribute() const = 0; + + // Returns true when the prediction scheme is initialized with all data it + // needs. + virtual bool IsInitialized() const = 0; + + // Returns the number of parent attributes that are needed for the prediction. + virtual int GetNumParentAttributes() const = 0; + + // Returns the type of each of the parent attribute. + virtual GeometryAttribute::Type GetParentAttributeType(int i) const = 0; + + // Sets the required parent attribute. + // Returns false if the attribute doesn't meet the requirements of the + // prediction scheme. + virtual bool SetParentAttribute(const PointAttribute *att) = 0; + + // Method should return true if the prediction scheme guarantees that all + // correction values are always positive (or at least non-negative). + virtual bool AreCorrectionsPositive() = 0; + + // Returns the transform type used by the prediction scheme. + virtual PredictionSchemeTransformType GetTransformType() const = 0; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_INTERFACE_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h new file mode 100644 index 000000000..5a6c7c2dd --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h @@ -0,0 +1,118 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_DECODING_TRANSFORM_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_DECODING_TRANSFORM_H_ + +#include + +#include "draco/compression/attributes/normal_compression_utils.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h" +#include "draco/core/decoder_buffer.h" +#include "draco/core/macros.h" +#include "draco/core/vector_d.h" + +namespace draco { + +// Class for converting correction values transformed by the canonicalized +// normal octahedron transform back to the original values. See the +// corresponding encoder for more details. +template +class PredictionSchemeNormalOctahedronCanonicalizedDecodingTransform + : public PredictionSchemeNormalOctahedronCanonicalizedTransformBase< + DataTypeT> { + public: + typedef VectorD Point2; + typedef DataTypeT CorrType; + typedef DataTypeT DataType; + + PredictionSchemeNormalOctahedronCanonicalizedDecodingTransform() {} + + // Dummy to fulfill concept. + void Init(int num_components) {} + + bool DecodeTransformData(DecoderBuffer *buffer) { + DataTypeT max_quantized_value, center_value; + if (!buffer->Decode(&max_quantized_value)) { + return false; + } + if (!buffer->Decode(¢er_value)) { + return false; + } + (void)center_value; + if (!this->set_max_quantized_value(max_quantized_value)) { + return false; + } + // Account for reading wrong values, e.g., due to fuzzing. + if (this->quantization_bits() < 2) { + return false; + } + if (this->quantization_bits() > 30) { + return false; + } + return true; + } + + inline void ComputeOriginalValue(const DataType *pred_vals, + const CorrType *corr_vals, + DataType *out_orig_vals) const { + DRACO_DCHECK_LE(pred_vals[0], 2 * this->center_value()); + DRACO_DCHECK_LE(pred_vals[1], 2 * this->center_value()); + DRACO_DCHECK_LE(corr_vals[0], 2 * this->center_value()); + DRACO_DCHECK_LE(corr_vals[1], 2 * this->center_value()); + + DRACO_DCHECK_LE(0, pred_vals[0]); + DRACO_DCHECK_LE(0, pred_vals[1]); + DRACO_DCHECK_LE(0, corr_vals[0]); + DRACO_DCHECK_LE(0, corr_vals[1]); + + const Point2 pred = Point2(pred_vals[0], pred_vals[1]); + const Point2 corr = Point2(corr_vals[0], corr_vals[1]); + const Point2 orig = ComputeOriginalValue(pred, corr); + + out_orig_vals[0] = orig[0]; + out_orig_vals[1] = orig[1]; + } + + private: + Point2 ComputeOriginalValue(Point2 pred, Point2 corr) const { + const Point2 t(this->center_value(), this->center_value()); + pred = pred - t; + const bool pred_is_in_diamond = this->IsInDiamond(pred[0], pred[1]); + if (!pred_is_in_diamond) { + this->InvertDiamond(&pred[0], &pred[1]); + } + const bool pred_is_in_bottom_left = this->IsInBottomLeft(pred); + const int32_t rotation_count = this->GetRotationCount(pred); + if (!pred_is_in_bottom_left) { + pred = this->RotatePoint(pred, rotation_count); + } + Point2 orig = pred + corr; + orig[0] = this->ModMax(orig[0]); + orig[1] = this->ModMax(orig[1]); + if (!pred_is_in_bottom_left) { + const int32_t reverse_rotation_count = (4 - rotation_count) % 4; + orig = this->RotatePoint(orig, reverse_rotation_count); + } + if (!pred_is_in_diamond) { + this->InvertDiamond(&orig[0], &orig[1]); + } + orig = orig + t; + return orig; + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_DECODING_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h new file mode 100644 index 000000000..0dc96967b --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h @@ -0,0 +1,116 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_ENCODING_TRANSFORM_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_ENCODING_TRANSFORM_H_ + +#include + +#include "draco/compression/attributes/normal_compression_utils.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h" +#include "draco/core/encoder_buffer.h" +#include "draco/core/macros.h" +#include "draco/core/vector_d.h" + +namespace draco { + +// The transform works on octahedral coordinates for normals. The square is +// subdivided into four inner triangles (diamond) and four outer triangles. The +// inner triangles are associated with the upper part of the octahedron and the +// outer triangles are associated with the lower part. +// Given a prediction value P and the actual value Q that should be encoded, +// this transform first checks if P is outside the diamond. If so, the outer +// triangles are flipped towards the inside and vice versa. Then it checks if p +// is in the bottom left quadrant. If it is not, it rotates p and q accordingly. +// The actual correction value is then based on the mapped and rotated P and Q +// values. The inversion tends to result in shorter correction vectors and the +// rotation makes it so that all long correction values are positive, reducing +// the possible value range of the correction values and increasing the +// occurrences of positive large correction values, which helps the entropy +// encoder. This is possible since P is also known by the decoder, see also +// ComputeCorrection and ComputeOriginalValue functions. +// Note that the tile is not periodic, which implies that the outer edges can +// not be identified, which requires us to use an odd number of values on each +// axis. +// DataTypeT is expected to be some integral type. +// +template +class PredictionSchemeNormalOctahedronCanonicalizedEncodingTransform + : public PredictionSchemeNormalOctahedronCanonicalizedTransformBase< + DataTypeT> { + public: + typedef PredictionSchemeNormalOctahedronCanonicalizedTransformBase + Base; + typedef VectorD Point2; + typedef DataTypeT CorrType; + typedef DataTypeT DataType; + + // We expect the mod value to be of the form 2^b-1. + explicit PredictionSchemeNormalOctahedronCanonicalizedEncodingTransform( + DataType max_quantized_value) + : Base(max_quantized_value) {} + + // Dummy function to fulfill concept. + void Init(const DataTypeT *orig_data, int size, int num_components) {} + + bool EncodeTransformData(EncoderBuffer *buffer) { + buffer->Encode(this->max_quantized_value()); + buffer->Encode(this->center_value()); + return true; + } + + inline void ComputeCorrection(const DataType *orig_vals, + const DataType *pred_vals, + CorrType *out_corr_vals) const { + DRACO_DCHECK_LE(pred_vals[0], this->center_value() * 2); + DRACO_DCHECK_LE(pred_vals[1], this->center_value() * 2); + DRACO_DCHECK_LE(orig_vals[0], this->center_value() * 2); + DRACO_DCHECK_LE(orig_vals[1], this->center_value() * 2); + DRACO_DCHECK_LE(0, pred_vals[0]); + DRACO_DCHECK_LE(0, pred_vals[1]); + DRACO_DCHECK_LE(0, orig_vals[0]); + DRACO_DCHECK_LE(0, orig_vals[1]); + + const Point2 orig = Point2(orig_vals[0], orig_vals[1]); + const Point2 pred = Point2(pred_vals[0], pred_vals[1]); + const Point2 corr = ComputeCorrection(orig, pred); + + out_corr_vals[0] = corr[0]; + out_corr_vals[1] = corr[1]; + } + + private: + Point2 ComputeCorrection(Point2 orig, Point2 pred) const { + const Point2 t(this->center_value(), this->center_value()); + orig = orig - t; + pred = pred - t; + if (!this->IsInDiamond(pred[0], pred[1])) { + this->InvertDiamond(&orig[0], &orig[1]); + this->InvertDiamond(&pred[0], &pred[1]); + } + if (!this->IsInBottomLeft(pred)) { + const int32_t rotation_count = this->GetRotationCount(pred); + orig = this->RotatePoint(orig, rotation_count); + pred = this->RotatePoint(pred, rotation_count); + } + Point2 corr = orig - pred; + corr[0] = this->MakePositive(corr[0]); + corr[1] = this->MakePositive(corr[1]); + return corr; + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_ENCODING_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h new file mode 100644 index 000000000..4a1e3a67b --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h @@ -0,0 +1,102 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_TRANSFORM_BASE_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_TRANSFORM_BASE_H_ + +#include + +#include "draco/compression/attributes/normal_compression_utils.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h" +#include "draco/compression/config/compression_shared.h" +#include "draco/core/bit_utils.h" +#include "draco/core/macros.h" +#include "draco/core/vector_d.h" + +namespace draco { + +// Base class containing shared functionality used by both encoding and decoding +// canonicalized normal octahedron prediction scheme transforms. See the +// encoding transform for more details about the method. +template +class PredictionSchemeNormalOctahedronCanonicalizedTransformBase + : public PredictionSchemeNormalOctahedronTransformBase { + public: + typedef PredictionSchemeNormalOctahedronTransformBase Base; + typedef VectorD Point2; + typedef DataTypeT DataType; + + PredictionSchemeNormalOctahedronCanonicalizedTransformBase() : Base() {} + // We expect the mod value to be of the form 2^b-1. + explicit PredictionSchemeNormalOctahedronCanonicalizedTransformBase( + DataType mod_value) + : Base(mod_value) {} + + static constexpr PredictionSchemeTransformType GetType() { + return PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON_CANONICALIZED; + } + + int32_t GetRotationCount(Point2 pred) const { + const DataType sign_x = pred[0]; + const DataType sign_y = pred[1]; + + int32_t rotation_count = 0; + if (sign_x == 0) { + if (sign_y == 0) { + rotation_count = 0; + } else if (sign_y > 0) { + rotation_count = 3; + } else { + rotation_count = 1; + } + } else if (sign_x > 0) { + if (sign_y >= 0) { + rotation_count = 2; + } else { + rotation_count = 1; + } + } else { + if (sign_y <= 0) { + rotation_count = 0; + } else { + rotation_count = 3; + } + } + return rotation_count; + } + + Point2 RotatePoint(Point2 p, int32_t rotation_count) const { + switch (rotation_count) { + case 1: + return Point2(p[1], -p[0]); + case 2: + return Point2(-p[0], -p[1]); + case 3: + return Point2(-p[1], p[0]); + default: + return p; + } + } + + bool IsInBottomLeft(const Point2 &p) const { + if (p[0] == 0 && p[1] == 0) { + return true; + } + return (p[0] < 0 && p[1] <= 0); + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_TRANSFORM_BASE_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc new file mode 100644 index 000000000..8c8932f77 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc @@ -0,0 +1,192 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h" +#include "draco/core/draco_test_base.h" + +namespace { + +class PredictionSchemeNormalOctahedronCanonicalizedTransformTest + : public ::testing::Test { + protected: + typedef draco::PredictionSchemeNormalOctahedronCanonicalizedEncodingTransform< + int32_t> + Transform; + typedef Transform::Point2 Point2; + + void TestComputeCorrection(const Transform &transform, const int32_t &ox, + const int32_t &oy, const int32_t &px, + const int32_t &py, const int32_t &cx, + const int32_t &cy) { + const int32_t o[2] = {ox + 7, oy + 7}; + const int32_t p[2] = {px + 7, py + 7}; + int32_t corr[2] = {500, 500}; + transform.ComputeCorrection(o, p, corr); + ASSERT_EQ(corr[0], (cx + 15) % 15); + ASSERT_EQ(corr[1], (cy + 15) % 15); + } + + void TestGetRotationCount(const Transform &transform, const Point2 &pred, + const int32_t &rot_dir) { + const int32_t rotation_count = transform.GetRotationCount(pred); + ASSERT_EQ(rot_dir, rotation_count); + } + + void TestRotateRepresentation(const Transform &transform, const Point2 &org, + const Point2 &pred, const Point2 &rot_org, + const Point2 &rot_pred) { + const int32_t rotation_count = transform.GetRotationCount(pred); + const Point2 res_org = transform.RotatePoint(org, rotation_count); + const Point2 res_pred = transform.RotatePoint(pred, rotation_count); + ASSERT_EQ(rot_org[0], res_org[0]); + ASSERT_EQ(rot_org[1], res_org[1]); + ASSERT_EQ(rot_pred[0], res_pred[0]); + ASSERT_EQ(rot_pred[1], res_pred[1]); + } +}; + +TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest, Init) { + const Transform transform(15); + ASSERT_TRUE(transform.AreCorrectionsPositive()); +} + +TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest, + IsInBottomLeft) { + const Transform transform(15); + ASSERT_TRUE(transform.IsInBottomLeft(Point2(0, 0))); + ASSERT_TRUE(transform.IsInBottomLeft(Point2(-1, -1))); + ASSERT_TRUE(transform.IsInBottomLeft(Point2(-7, -7))); + + ASSERT_FALSE(transform.IsInBottomLeft(Point2(1, 1))); + ASSERT_FALSE(transform.IsInBottomLeft(Point2(7, 7))); + ASSERT_FALSE(transform.IsInBottomLeft(Point2(-1, 1))); + ASSERT_FALSE(transform.IsInBottomLeft(Point2(-7, 7))); + ASSERT_FALSE(transform.IsInBottomLeft(Point2(1, -1))); + ASSERT_FALSE(transform.IsInBottomLeft(Point2(7, -7))); +} + +TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest, + GetRotationCount) { + const Transform transform(15); + TestGetRotationCount(transform, Point2(1, 2), 2); // top right + TestGetRotationCount(transform, Point2(-1, 2), 3); // top left + TestGetRotationCount(transform, Point2(1, -2), 1); // bottom right + TestGetRotationCount(transform, Point2(-1, -2), 0); // bottom left + TestGetRotationCount(transform, Point2(0, 2), 3); // top left + TestGetRotationCount(transform, Point2(0, -2), 1); // bottom right + TestGetRotationCount(transform, Point2(2, 0), 2); // top right + TestGetRotationCount(transform, Point2(-2, 0), 0); // bottom left + TestGetRotationCount(transform, Point2(0, 0), 0); // bottom left +} + +TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest, + RotateRepresentation) { + const Transform transform(15); + // p top left; shift clockwise by 3 + TestRotateRepresentation(transform, Point2(1, 2), Point2(-3, 1), + Point2(-2, 1), Point2(-1, -3)); // q top right + TestRotateRepresentation(transform, Point2(-1, -2), Point2(-3, 1), + Point2(2, -1), Point2(-1, -3)); // q bottom left + TestRotateRepresentation(transform, Point2(1, -2), Point2(-3, 1), + Point2(2, 1), Point2(-1, -3)); // q bottom right + TestRotateRepresentation(transform, Point2(-1, 2), Point2(-3, 1), + Point2(-2, -1), Point2(-1, -3)); // q top left + // p top right; shift clockwise by 2 (flip) + TestRotateRepresentation(transform, Point2(1, 1), Point2(1, 3), + Point2(-1, -1), Point2(-1, -3)); // q top right + TestRotateRepresentation(transform, Point2(-1, -2), Point2(1, 3), + Point2(1, 2), Point2(-1, -3)); // q bottom left + TestRotateRepresentation(transform, Point2(-1, 2), Point2(1, 3), + Point2(1, -2), Point2(-1, -3)); // q top left + TestRotateRepresentation(transform, Point2(1, -2), Point2(1, 3), + Point2(-1, 2), Point2(-1, -3)); // q bottom right + // p bottom right; shift clockwise by 1 + TestRotateRepresentation(transform, Point2(1, 2), Point2(3, -1), + Point2(2, -1), Point2(-1, -3)); // q top right + TestRotateRepresentation(transform, Point2(1, -2), Point2(3, -1), + Point2(-2, -1), Point2(-1, -3)); // q bottom right + TestRotateRepresentation(transform, Point2(-1, -2), Point2(3, -1), + Point2(-2, 1), Point2(-1, -3)); // q bottom left + TestRotateRepresentation(transform, Point2(-1, 2), Point2(3, -1), + Point2(2, 1), Point2(-1, -3)); // q top left + // p bottom left; no change + TestRotateRepresentation(transform, Point2(1, 2), Point2(-1, -3), + Point2(1, 2), Point2(-1, -3)); // q top right + TestRotateRepresentation(transform, Point2(-1, 2), Point2(-1, -3), + Point2(-1, 2), Point2(-1, -3)); // q top left + TestRotateRepresentation(transform, Point2(1, -2), Point2(-1, -3), + Point2(1, -2), Point2(-1, -3)); // q bottom right + TestRotateRepresentation(transform, Point2(-1, -2), Point2(-1, -3), + Point2(-1, -2), Point2(-1, -3)); // q bottom left +} + +TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest, + ComputeCorrection) { + const Transform transform(15); + TestComputeCorrection(transform, 0, 0, 0, 0, 0, 0); + TestComputeCorrection(transform, 1, 1, 1, 1, 0, 0); + // inside diamond; p top right + TestComputeCorrection(transform, 3, 4, 1, 2, -2, -2); // q top right + TestComputeCorrection(transform, -3, 4, 1, 2, 4, -2); // q top left + TestComputeCorrection(transform, 3, -4, 1, 2, -2, 6); // q bottom right + TestComputeCorrection(transform, -3, -4, 1, 2, 4, 6); // q bottom left + // inside diamond; p top left + TestComputeCorrection(transform, 3, 4, -1, 2, -2, 4); // q top right + TestComputeCorrection(transform, -3, 4, -1, 2, -2, -2); // q top left + TestComputeCorrection(transform, 3, -4, -1, 2, 6, 4); // q bottom right + TestComputeCorrection(transform, -3, -4, -1, 2, 6, -2); // q bottom left + // inside diamond; p bottom right + TestComputeCorrection(transform, 3, 4, 1, -2, 6, -2); // q top right + TestComputeCorrection(transform, -3, 4, 1, -2, 6, 4); // q top left + TestComputeCorrection(transform, 3, -4, 1, -2, -2, -2); // q bottom right + TestComputeCorrection(transform, -3, -4, 1, -2, -2, 4); // q bottom left + // inside diamond; p bottom left + TestComputeCorrection(transform, 3, 4, -1, -2, 4, 6); // q top right + TestComputeCorrection(transform, -3, 4, -1, -2, -2, 6); // q top left + TestComputeCorrection(transform, 3, -4, -1, -2, 4, -2); // q bottom right + TestComputeCorrection(transform, -3, -4, -1, -2, -2, -2); // q bottom left + // outside diamond; p top right + TestComputeCorrection(transform, 1, 2, 5, 4, -2, -4); // q top right + TestComputeCorrection(transform, -1, 2, 5, 4, -7, -4); // q top left + TestComputeCorrection(transform, 1, -2, 5, 4, -2, -7); // q bottom right + TestComputeCorrection(transform, -1, -2, 5, 4, -7, -7); // q bottom left + // outside diamond; p top left + TestComputeCorrection(transform, 1, 2, -5, 4, -4, -7); // q top right + TestComputeCorrection(transform, -1, 2, -5, 4, -4, -2); // q top left + TestComputeCorrection(transform, 1, -2, -5, 4, -7, -7); // q bottom right + TestComputeCorrection(transform, -1, -2, -5, 4, -7, -2); // q bottom left + // outside diamond; p bottom right + TestComputeCorrection(transform, 1, 2, 5, -4, -7, -2); // q top right + TestComputeCorrection(transform, -1, 2, 5, -4, -7, -7); // q top left + TestComputeCorrection(transform, 1, -2, 5, -4, -4, -2); // q bottom right + TestComputeCorrection(transform, -1, -2, 5, -4, -4, -7); // q bottom left + // outside diamond; p bottom left + TestComputeCorrection(transform, 1, 2, -5, -4, -7, -7); // q top right + TestComputeCorrection(transform, -1, 2, -5, -4, -2, -7); // q top left + TestComputeCorrection(transform, 1, -2, -5, -4, -7, -4); // q bottom right + TestComputeCorrection(transform, -1, -2, -5, -4, -2, -4); // q bottom left + + TestComputeCorrection(transform, -1, -2, 7, 7, -5, -6); + TestComputeCorrection(transform, 0, 0, 7, 7, 7, 7); + TestComputeCorrection(transform, -1, -2, 0, -2, 0, 1); +} + +TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest, Interface) { + const Transform transform(15); + ASSERT_EQ(transform.max_quantized_value(), 15); + ASSERT_EQ(transform.center_value(), 7); + ASSERT_EQ(transform.quantization_bits(), 4); +} + +} // namespace diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h new file mode 100644 index 000000000..a1bc4a327 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h @@ -0,0 +1,103 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_DECODING_TRANSFORM_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_DECODING_TRANSFORM_H_ + +#include + +#include "draco/compression/attributes/normal_compression_utils.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h" +#include "draco/core/decoder_buffer.h" +#include "draco/core/macros.h" +#include "draco/core/vector_d.h" +#include "draco/draco_features.h" + +namespace draco { + +// Class for converting correction values transformed by the octahedral normal +// transform back to the original values. See the corresponding encoder for more +// details. +template +class PredictionSchemeNormalOctahedronDecodingTransform + : public PredictionSchemeNormalOctahedronTransformBase { + public: + typedef VectorD Point2; + typedef DataTypeT CorrType; + typedef DataTypeT DataType; + + PredictionSchemeNormalOctahedronDecodingTransform() {} + + // Dummy function to fulfill concept. + void Init(int num_components) {} + bool DecodeTransformData(DecoderBuffer *buffer) { + DataTypeT max_quantized_value, center_value; + if (!buffer->Decode(&max_quantized_value)) { + return false; + } + if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) { + if (!buffer->Decode(¢er_value)) { + return false; + } + } + (void)center_value; + return this->set_max_quantized_value(max_quantized_value); + } + + inline void ComputeOriginalValue(const DataType *pred_vals, + const CorrType *corr_vals, + DataType *out_orig_vals) const { + DRACO_DCHECK_LE(pred_vals[0], 2 * this->center_value()); + DRACO_DCHECK_LE(pred_vals[1], 2 * this->center_value()); + DRACO_DCHECK_LE(corr_vals[0], 2 * this->center_value()); + DRACO_DCHECK_LE(corr_vals[1], 2 * this->center_value()); + + DRACO_DCHECK_LE(0, pred_vals[0]); + DRACO_DCHECK_LE(0, pred_vals[1]); + DRACO_DCHECK_LE(0, corr_vals[0]); + DRACO_DCHECK_LE(0, corr_vals[1]); + + const Point2 pred = Point2(pred_vals[0], pred_vals[1]); + const Point2 corr = Point2(corr_vals[0], corr_vals[1]); + const Point2 orig = ComputeOriginalValue(pred, corr); + + out_orig_vals[0] = orig[0]; + out_orig_vals[1] = orig[1]; + } + + private: + Point2 ComputeOriginalValue(Point2 pred, const Point2 &corr) const { + const Point2 t(this->center_value(), this->center_value()); + pred = pred - t; + + const bool pred_is_in_diamond = this->IsInDiamond(pred[0], pred[1]); + if (!pred_is_in_diamond) { + this->InvertDiamond(&pred[0], &pred[1]); + } + Point2 orig = pred + corr; + orig[0] = this->ModMax(orig[0]); + orig[1] = this->ModMax(orig[1]); + if (!pred_is_in_diamond) { + this->InvertDiamond(&orig[0], &orig[1]); + } + orig = orig + t; + return orig; + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_DECODING_TRANSFORM_H_ +#endif diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_encoding_transform.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_encoding_transform.h new file mode 100644 index 000000000..4abfef669 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_encoding_transform.h @@ -0,0 +1,105 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_ENCODING_TRANSFORM_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_ENCODING_TRANSFORM_H_ + +#include + +#include "draco/compression/attributes/normal_compression_utils.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h" +#include "draco/core/encoder_buffer.h" +#include "draco/core/macros.h" +#include "draco/core/vector_d.h" + +namespace draco { + +// The transform works on octahedral coordinates for normals. The square is +// subdivided into four inner triangles (diamond) and four outer triangles. The +// inner triangles are associated with the upper part of the octahedron and the +// outer triangles are associated with the lower part. +// Given a prediction value P and the actual value Q that should be encoded, +// this transform first checks if P is outside the diamond. If so, the outer +// triangles are flipped towards the inside and vice versa. The actual +// correction value is then based on the mapped P and Q values. This tends to +// result in shorter correction vectors. +// This is possible since the P value is also known by the decoder, see also +// ComputeCorrection and ComputeOriginalValue functions. +// Note that the tile is not periodic, which implies that the outer edges can +// not be identified, which requires us to use an odd number of values on each +// axis. +// DataTypeT is expected to be some integral type. +// +template +class PredictionSchemeNormalOctahedronEncodingTransform + : public PredictionSchemeNormalOctahedronTransformBase { + public: + typedef PredictionSchemeNormalOctahedronTransformBase Base; + typedef VectorD Point2; + typedef DataTypeT CorrType; + typedef DataTypeT DataType; + + // We expect the mod value to be of the form 2^b-1. + explicit PredictionSchemeNormalOctahedronEncodingTransform( + DataType max_quantized_value) + : Base(max_quantized_value) {} + + void Init(const DataTypeT *orig_data, int size, int num_components) {} + + bool EncodeTransformData(EncoderBuffer *buffer) { + buffer->Encode(this->max_quantized_value()); + return true; + } + + inline void ComputeCorrection(const DataType *orig_vals, + const DataType *pred_vals, + CorrType *out_corr_vals) const { + DRACO_DCHECK_LE(pred_vals[0], this->center_value() * 2); + DRACO_DCHECK_LE(pred_vals[1], this->center_value() * 2); + DRACO_DCHECK_LE(orig_vals[0], this->center_value() * 2); + DRACO_DCHECK_LE(orig_vals[1], this->center_value() * 2); + DRACO_DCHECK_LE(0, pred_vals[0]); + DRACO_DCHECK_LE(0, pred_vals[1]); + DRACO_DCHECK_LE(0, orig_vals[0]); + DRACO_DCHECK_LE(0, orig_vals[1]); + + const Point2 orig = Point2(orig_vals[0], orig_vals[1]); + const Point2 pred = Point2(pred_vals[0], pred_vals[1]); + const Point2 corr = ComputeCorrection(orig, pred); + + out_corr_vals[0] = corr[0]; + out_corr_vals[1] = corr[1]; + } + + private: + Point2 ComputeCorrection(Point2 orig, Point2 pred) const { + const Point2 t(this->center_value(), this->center_value()); + orig = orig - t; + pred = pred - t; + + if (!this->IsInDiamond(pred[0], pred[1])) { + this->InvertDiamond(&orig[0], &orig[1]); + this->InvertDiamond(&pred[0], &pred[1]); + } + + Point2 corr = orig - pred; + corr[0] = this->MakePositive(corr[0]); + corr[1] = this->MakePositive(corr[1]); + return corr; + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_ENCODING_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h new file mode 100644 index 000000000..c9dd7d67b --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h @@ -0,0 +1,90 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_TRANSFORM_BASE_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_TRANSFORM_BASE_H_ + +#include + +#include "draco/compression/attributes/normal_compression_utils.h" +#include "draco/compression/config/compression_shared.h" +#include "draco/core/bit_utils.h" +#include "draco/core/macros.h" +#include "draco/core/vector_d.h" + +namespace draco { + +// Base class containing shared functionality used by both encoding and decoding +// octahedral normal prediction scheme transforms. See the encoding transform +// for more details about the method. +template +class PredictionSchemeNormalOctahedronTransformBase { + public: + typedef VectorD Point2; + typedef DataTypeT DataType; + + PredictionSchemeNormalOctahedronTransformBase() {} + // We expect the mod value to be of the form 2^b-1. + explicit PredictionSchemeNormalOctahedronTransformBase( + DataType max_quantized_value) { + this->set_max_quantized_value(max_quantized_value); + } + + static constexpr PredictionSchemeTransformType GetType() { + return PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON; + } + + // We can return true as we keep correction values positive. + bool AreCorrectionsPositive() const { return true; } + + inline DataTypeT max_quantized_value() const { + return octahedron_tool_box_.max_quantized_value(); + } + inline DataTypeT center_value() const { + return octahedron_tool_box_.center_value(); + } + inline int32_t quantization_bits() const { + return octahedron_tool_box_.quantization_bits(); + } + + protected: + inline bool set_max_quantized_value(DataTypeT max_quantized_value) { + if (max_quantized_value % 2 == 0) { + return false; + } + int q = MostSignificantBit(max_quantized_value) + 1; + return octahedron_tool_box_.SetQuantizationBits(q); + } + + bool IsInDiamond(DataTypeT s, DataTypeT t) const { + return octahedron_tool_box_.IsInDiamond(s, t); + } + void InvertDiamond(DataTypeT *s, DataTypeT *t) const { + return octahedron_tool_box_.InvertDiamond(s, t); + } + + int32_t ModMax(int32_t x) const { return octahedron_tool_box_.ModMax(x); } + + // For correction values. + int32_t MakePositive(int32_t x) const { + return octahedron_tool_box_.MakePositive(x); + } + + private: + OctahedronToolBox octahedron_tool_box_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_TRANSFORM_BASE_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc new file mode 100644 index 000000000..1001b19fa --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc @@ -0,0 +1,71 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_encoding_transform.h" +#include "draco/core/draco_test_base.h" + +namespace { + +class PredictionSchemeNormalOctahedronTransformTest : public ::testing::Test { + protected: + typedef draco::PredictionSchemeNormalOctahedronEncodingTransform + Transform; + typedef Transform::Point2 Point2; + + void TestComputeCorrection(const Transform &transform, const int32_t &ox, + const int32_t &oy, const int32_t &px, + const int32_t &py, const int32_t &cx, + const int32_t &cy) { + const int32_t o[2] = {ox + 7, oy + 7}; + const int32_t p[2] = {px + 7, py + 7}; + int32_t corr[2] = {500, 500}; + transform.ComputeCorrection(o, p, corr); + ASSERT_EQ(corr[0], (cx + 15) % 15); + ASSERT_EQ(corr[1], (cy + 15) % 15); + } +}; + +TEST_F(PredictionSchemeNormalOctahedronTransformTest, Init) { + const Transform transform(15); + ASSERT_TRUE(transform.AreCorrectionsPositive()); +} + +TEST_F(PredictionSchemeNormalOctahedronTransformTest, ComputeCorrections) { + const Transform transform(15); + // checks inside diamond + TestComputeCorrection(transform, 0, 0, 0, 0, 0, 0); + TestComputeCorrection(transform, 1, 1, 1, 1, 0, 0); + TestComputeCorrection(transform, 3, 4, 1, 1, 2, 3); + TestComputeCorrection(transform, -1, -1, -1, -1, 0, 0); + TestComputeCorrection(transform, -3, -4, -1, -1, -2, -3); + // checks outside diamond + TestComputeCorrection(transform, 4, 4, 4, 4, 0, 0); + TestComputeCorrection(transform, 5, 6, 4, 4, -2, -1); + TestComputeCorrection(transform, 3, 2, 4, 4, 2, 1); + // checks on outer edges + TestComputeCorrection(transform, 7, 7, 4, 4, -3, -3); + TestComputeCorrection(transform, 6, 7, 4, 4, -3, -2); + TestComputeCorrection(transform, -6, 7, 4, 4, -3, -2); + TestComputeCorrection(transform, 7, 6, 4, 4, -2, -3); + TestComputeCorrection(transform, 7, -6, 4, 4, -2, -3); +} + +TEST_F(PredictionSchemeNormalOctahedronTransformTest, Interface) { + const Transform transform(15); + ASSERT_EQ(transform.max_quantized_value(), 15); + ASSERT_EQ(transform.center_value(), 7); + ASSERT_EQ(transform.quantization_bits(), 4); +} + +} // namespace diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_decoding_transform.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_decoding_transform.h new file mode 100644 index 000000000..e100c738a --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_decoding_transform.h @@ -0,0 +1,88 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_DECODING_TRANSFORM_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_DECODING_TRANSFORM_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h" +#include "draco/core/decoder_buffer.h" + +namespace draco { + +// PredictionSchemeWrapDecodingTransform unwraps values encoded with the +// PredictionSchemeWrapEncodingTransform. +// See prediction_scheme_wrap_transform_base.h for more details about the +// method. +template +class PredictionSchemeWrapDecodingTransform + : public PredictionSchemeWrapTransformBase { + public: + typedef CorrTypeT CorrType; + PredictionSchemeWrapDecodingTransform() {} + + // Computes the original value from the input predicted value and the decoded + // corrections. Values out of the bounds of the input values are unwrapped. + inline void ComputeOriginalValue(const DataTypeT *predicted_vals, + const CorrTypeT *corr_vals, + DataTypeT *out_original_vals) const { + // For now we assume both |DataTypeT| and |CorrTypeT| are equal. + static_assert(std::is_same::value, + "Predictions and corrections must have the same type."); + + // The only valid implementation right now is for int32_t. + static_assert(std::is_same::value, + "Only int32_t is supported for predicted values."); + + predicted_vals = this->ClampPredictedValue(predicted_vals); + + // Perform the wrapping using unsigned coordinates to avoid potential signed + // integer overflows caused by malformed input. + const uint32_t *const uint_predicted_vals = + reinterpret_cast(predicted_vals); + const uint32_t *const uint_corr_vals = + reinterpret_cast(corr_vals); + for (int i = 0; i < this->num_components(); ++i) { + out_original_vals[i] = + static_cast(uint_predicted_vals[i] + uint_corr_vals[i]); + if (out_original_vals[i] > this->max_value()) { + out_original_vals[i] -= this->max_dif(); + } else if (out_original_vals[i] < this->min_value()) { + out_original_vals[i] += this->max_dif(); + } + } + } + + bool DecodeTransformData(DecoderBuffer *buffer) { + DataTypeT min_value, max_value; + if (!buffer->Decode(&min_value)) { + return false; + } + if (!buffer->Decode(&max_value)) { + return false; + } + if (min_value > max_value) { + return false; + } + this->set_min_value(min_value); + this->set_max_value(max_value); + if (!this->InitCorrectionBounds()) { + return false; + } + return true; + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_DECODING_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_encoding_transform.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_encoding_transform.h new file mode 100644 index 000000000..1f5e8b135 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_encoding_transform.h @@ -0,0 +1,81 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_ENCODING_TRANSFORM_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_ENCODING_TRANSFORM_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h" +#include "draco/core/encoder_buffer.h" + +namespace draco { + +// PredictionSchemeWrapEncodingTransform wraps input values using the wrapping +// scheme described in: prediction_scheme_wrap_transform_base.h . +template +class PredictionSchemeWrapEncodingTransform + : public PredictionSchemeWrapTransformBase { + public: + typedef CorrTypeT CorrType; + PredictionSchemeWrapEncodingTransform() {} + + void Init(const DataTypeT *orig_data, int size, int num_components) { + PredictionSchemeWrapTransformBase::Init(num_components); + // Go over the original values and compute the bounds. + if (size == 0) { + return; + } + DataTypeT min_value = orig_data[0]; + DataTypeT max_value = min_value; + for (int i = 1; i < size; ++i) { + if (orig_data[i] < min_value) { + min_value = orig_data[i]; + } else if (orig_data[i] > max_value) { + max_value = orig_data[i]; + } + } + this->set_min_value(min_value); + this->set_max_value(max_value); + this->InitCorrectionBounds(); + } + + // Computes the corrections based on the input original value and the + // predicted value. Out of bound correction values are wrapped around the max + // range of input values. + inline void ComputeCorrection(const DataTypeT *original_vals, + const DataTypeT *predicted_vals, + CorrTypeT *out_corr_vals) const { + for (int i = 0; i < this->num_components(); ++i) { + predicted_vals = this->ClampPredictedValue(predicted_vals); + out_corr_vals[i] = original_vals[i] - predicted_vals[i]; + // Wrap around if needed. + DataTypeT &corr_val = out_corr_vals[i]; + if (corr_val < this->min_correction()) { + corr_val += this->max_dif(); + } else if (corr_val > this->max_correction()) { + corr_val -= this->max_dif(); + } + } + } + + bool EncodeTransformData(EncoderBuffer *buffer) { + // Store the input value range as it is needed by the decoder. + buffer->Encode(this->min_value()); + buffer->Encode(this->max_value()); + return true; + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_ENCODING_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h new file mode 100644 index 000000000..26f61fbaf --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h @@ -0,0 +1,120 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_TRANSFORM_BASE_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_TRANSFORM_BASE_H_ + +#include +#include + +#include "draco/compression/config/compression_shared.h" +#include "draco/core/macros.h" + +namespace draco { + +// PredictionSchemeWrapTransform uses the min and max bounds of the original +// data to wrap stored correction values around these bounds centered at 0, +// i.e., when the range of the original values O is between and +// N = MAX-MIN, we can then store any correction X = O - P, as: +// X + N, if X < -N / 2 +// X - N, if X > N / 2 +// X otherwise +// To unwrap this value, the decoder then simply checks whether the final +// corrected value F = P + X is out of the bounds of the input values. +// All out of bounds values are unwrapped using +// F + N, if F < MIN +// F - N, if F > MAX +// This wrapping can reduce the number of unique values, which translates to a +// better entropy of the stored values and better compression rates. +template +class PredictionSchemeWrapTransformBase { + public: + PredictionSchemeWrapTransformBase() + : num_components_(0), + min_value_(0), + max_value_(0), + max_dif_(0), + max_correction_(0), + min_correction_(0) {} + + static constexpr PredictionSchemeTransformType GetType() { + return PREDICTION_TRANSFORM_WRAP; + } + + void Init(int num_components) { + num_components_ = num_components; + clamped_value_.resize(num_components); + } + + bool AreCorrectionsPositive() const { return false; } + + inline const DataTypeT *ClampPredictedValue( + const DataTypeT *predicted_val) const { + for (int i = 0; i < this->num_components(); ++i) { + if (predicted_val[i] > max_value_) { + clamped_value_[i] = max_value_; + } else if (predicted_val[i] < min_value_) { + clamped_value_[i] = min_value_; + } else { + clamped_value_[i] = predicted_val[i]; + } + } + return &clamped_value_[0]; + } + + // TODO(hemmer): Consider refactoring to avoid this dummy. + int quantization_bits() const { + DRACO_DCHECK(false); + return -1; + } + + protected: + bool InitCorrectionBounds() { + const int64_t dif = + static_cast(max_value_) - static_cast(min_value_); + if (dif < 0 || dif >= std::numeric_limits::max()) { + return false; + } + max_dif_ = 1 + static_cast(dif); + max_correction_ = max_dif_ / 2; + min_correction_ = -max_correction_; + if ((max_dif_ & 1) == 0) { + max_correction_ -= 1; + } + return true; + } + + inline int num_components() const { return num_components_; } + inline DataTypeT min_value() const { return min_value_; } + inline void set_min_value(const DataTypeT &v) { min_value_ = v; } + inline DataTypeT max_value() const { return max_value_; } + inline void set_max_value(const DataTypeT &v) { max_value_ = v; } + inline DataTypeT max_dif() const { return max_dif_; } + inline DataTypeT min_correction() const { return min_correction_; } + inline DataTypeT max_correction() const { return max_correction_; } + + private: + int num_components_; + DataTypeT min_value_; + DataTypeT max_value_; + DataTypeT max_dif_; + DataTypeT max_correction_; + DataTypeT min_correction_; + // This is in fact just a tmp variable to avoid reallocation. + mutable std::vector clamped_value_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_TRANSFORM_BASE_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.cc b/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.cc new file mode 100644 index 000000000..b4ba24f2d --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.cc @@ -0,0 +1,118 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_attribute_decoder.h" + +namespace draco { + +SequentialAttributeDecoder::SequentialAttributeDecoder() + : decoder_(nullptr), attribute_(nullptr), attribute_id_(-1) {} + +bool SequentialAttributeDecoder::Init(PointCloudDecoder *decoder, + int attribute_id) { + decoder_ = decoder; + attribute_ = decoder->point_cloud()->attribute(attribute_id); + attribute_id_ = attribute_id; + return true; +} + +bool SequentialAttributeDecoder::InitializeStandalone( + PointAttribute *attribute) { + attribute_ = attribute; + attribute_id_ = -1; + return true; +} + +bool SequentialAttributeDecoder::DecodePortableAttribute( + const std::vector &point_ids, DecoderBuffer *in_buffer) { + if (attribute_->num_components() <= 0 || + !attribute_->Reset(point_ids.size())) { + return false; + } + if (!DecodeValues(point_ids, in_buffer)) { + return false; + } + return true; +} + +bool SequentialAttributeDecoder::DecodeDataNeededByPortableTransform( + const std::vector &point_ids, DecoderBuffer *in_buffer) { + // Default implementation does not apply any transform. + return true; +} + +bool SequentialAttributeDecoder::TransformAttributeToOriginalFormat( + const std::vector &point_ids) { + // Default implementation does not apply any transform. + return true; +} + +const PointAttribute *SequentialAttributeDecoder::GetPortableAttribute() { + // If needed, copy point to attribute value index mapping from the final + // attribute to the portable attribute. + if (!attribute_->is_mapping_identity() && portable_attribute_ && + portable_attribute_->is_mapping_identity()) { + portable_attribute_->SetExplicitMapping(attribute_->indices_map_size()); + for (PointIndex i(0); + i < static_cast(attribute_->indices_map_size()); ++i) { + portable_attribute_->SetPointMapEntry(i, attribute_->mapped_index(i)); + } + } + return portable_attribute_.get(); +} + +bool SequentialAttributeDecoder::InitPredictionScheme( + PredictionSchemeInterface *ps) { + for (int i = 0; i < ps->GetNumParentAttributes(); ++i) { + const int att_id = decoder_->point_cloud()->GetNamedAttributeId( + ps->GetParentAttributeType(i)); + if (att_id == -1) { + return false; // Requested attribute does not exist. + } +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) { + if (!ps->SetParentAttribute(decoder_->point_cloud()->attribute(att_id))) { + return false; + } + } else +#endif + { + const PointAttribute *const pa = decoder_->GetPortableAttribute(att_id); + if (pa == nullptr || !ps->SetParentAttribute(pa)) { + return false; + } + } + } + return true; +} + +bool SequentialAttributeDecoder::DecodeValues( + const std::vector &point_ids, DecoderBuffer *in_buffer) { + const int32_t num_values = static_cast(point_ids.size()); + const int entry_size = static_cast(attribute_->byte_stride()); + std::unique_ptr value_data_ptr(new uint8_t[entry_size]); + uint8_t *const value_data = value_data_ptr.get(); + int out_byte_pos = 0; + // Decode raw attribute values in their original format. + for (int i = 0; i < num_values; ++i) { + if (!in_buffer->Decode(value_data, entry_size)) { + return false; + } + attribute_->buffer()->Write(out_byte_pos, value_data, entry_size); + out_byte_pos += entry_size; + } + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.h b/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.h new file mode 100644 index 000000000..d48119465 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.h @@ -0,0 +1,86 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_DECODER_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h" +#include "draco/compression/point_cloud/point_cloud_decoder.h" +#include "draco/draco_features.h" + +namespace draco { + +// A base class for decoding attribute values encoded by the +// SequentialAttributeEncoder. +class SequentialAttributeDecoder { + public: + SequentialAttributeDecoder(); + virtual ~SequentialAttributeDecoder() = default; + + virtual bool Init(PointCloudDecoder *decoder, int attribute_id); + + // Initialization for a specific attribute. This can be used mostly for + // standalone decoding of an attribute without an PointCloudDecoder. + virtual bool InitializeStandalone(PointAttribute *attribute); + + // Performs lossless decoding of the portable attribute data. + virtual bool DecodePortableAttribute(const std::vector &point_ids, + DecoderBuffer *in_buffer); + + // Decodes any data needed to revert portable transform of the decoded + // attribute. + virtual bool DecodeDataNeededByPortableTransform( + const std::vector &point_ids, DecoderBuffer *in_buffer); + + // Reverts transformation performed by encoder in + // SequentialAttributeEncoder::TransformAttributeToPortableFormat() method. + virtual bool TransformAttributeToOriginalFormat( + const std::vector &point_ids); + + const PointAttribute *GetPortableAttribute(); + + const PointAttribute *attribute() const { return attribute_; } + PointAttribute *attribute() { return attribute_; } + int attribute_id() const { return attribute_id_; } + PointCloudDecoder *decoder() const { return decoder_; } + + protected: + // Should be used to initialize newly created prediction scheme. + // Returns false when the initialization failed (in which case the scheme + // cannot be used). + virtual bool InitPredictionScheme(PredictionSchemeInterface *ps); + + // The actual implementation of the attribute decoding. Should be overridden + // for specialized decoders. + virtual bool DecodeValues(const std::vector &point_ids, + DecoderBuffer *in_buffer); + + void SetPortableAttribute(std::unique_ptr att) { + portable_attribute_ = std::move(att); + } + + PointAttribute *portable_attribute() { return portable_attribute_.get(); } + + private: + PointCloudDecoder *decoder_; + PointAttribute *attribute_; + int attribute_id_; + + // Storage for decoded portable attribute (after lossless decoding). + std::unique_ptr portable_attribute_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.cc b/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.cc new file mode 100644 index 000000000..0e5e26bca --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.cc @@ -0,0 +1,149 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_attribute_decoders_controller.h" +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED +#include "draco/compression/attributes/sequential_normal_attribute_decoder.h" +#endif +#include "draco/compression/attributes/sequential_quantization_attribute_decoder.h" +#include "draco/compression/config/compression_shared.h" + +namespace draco { + +SequentialAttributeDecodersController::SequentialAttributeDecodersController( + std::unique_ptr sequencer) + : sequencer_(std::move(sequencer)) {} + +bool SequentialAttributeDecodersController::DecodeAttributesDecoderData( + DecoderBuffer *buffer) { + if (!AttributesDecoder::DecodeAttributesDecoderData(buffer)) { + return false; + } + // Decode unique ids of all sequential encoders and create them. + const int32_t num_attributes = GetNumAttributes(); + sequential_decoders_.resize(num_attributes); + for (int i = 0; i < num_attributes; ++i) { + uint8_t decoder_type; + if (!buffer->Decode(&decoder_type)) { + return false; + } + // Create the decoder from the id. + sequential_decoders_[i] = CreateSequentialDecoder(decoder_type); + if (!sequential_decoders_[i]) { + return false; + } + if (!sequential_decoders_[i]->Init(GetDecoder(), GetAttributeId(i))) { + return false; + } + } + return true; +} + +bool SequentialAttributeDecodersController::DecodeAttributes( + DecoderBuffer *buffer) { + if (!sequencer_ || !sequencer_->GenerateSequence(&point_ids_)) { + return false; + } + // Initialize point to attribute value mapping for all decoded attributes. + const int32_t num_attributes = GetNumAttributes(); + for (int i = 0; i < num_attributes; ++i) { + PointAttribute *const pa = + GetDecoder()->point_cloud()->attribute(GetAttributeId(i)); + if (!sequencer_->UpdatePointToAttributeIndexMapping(pa)) { + return false; + } + } + return AttributesDecoder::DecodeAttributes(buffer); +} + +bool SequentialAttributeDecodersController::DecodePortableAttributes( + DecoderBuffer *in_buffer) { + const int32_t num_attributes = GetNumAttributes(); + for (int i = 0; i < num_attributes; ++i) { + if (!sequential_decoders_[i]->DecodePortableAttribute(point_ids_, + in_buffer)) { + return false; + } + } + return true; +} + +bool SequentialAttributeDecodersController:: + DecodeDataNeededByPortableTransforms(DecoderBuffer *in_buffer) { + const int32_t num_attributes = GetNumAttributes(); + for (int i = 0; i < num_attributes; ++i) { + if (!sequential_decoders_[i]->DecodeDataNeededByPortableTransform( + point_ids_, in_buffer)) { + return false; + } + } + return true; +} + +bool SequentialAttributeDecodersController:: + TransformAttributesToOriginalFormat() { + const int32_t num_attributes = GetNumAttributes(); + for (int i = 0; i < num_attributes; ++i) { + // Check whether the attribute transform should be skipped. + if (GetDecoder()->options()) { + const PointAttribute *const attribute = + sequential_decoders_[i]->attribute(); + const PointAttribute *const portable_attribute = + sequential_decoders_[i]->GetPortableAttribute(); + if (portable_attribute && + GetDecoder()->options()->GetAttributeBool( + attribute->attribute_type(), "skip_attribute_transform", false)) { + // Attribute transform should not be performed. In this case, we replace + // the output geometry attribute with the portable attribute. + // TODO(ostava): We can potentially avoid this copy by introducing a new + // mechanism that would allow to use the final attributes as portable + // attributes for predictors that may need them. + sequential_decoders_[i]->attribute()->CopyFrom(*portable_attribute); + continue; + } + } + if (!sequential_decoders_[i]->TransformAttributeToOriginalFormat( + point_ids_)) { + return false; + } + } + return true; +} + +std::unique_ptr +SequentialAttributeDecodersController::CreateSequentialDecoder( + uint8_t decoder_type) { + switch (decoder_type) { + case SEQUENTIAL_ATTRIBUTE_ENCODER_GENERIC: + return std::unique_ptr( + new SequentialAttributeDecoder()); + case SEQUENTIAL_ATTRIBUTE_ENCODER_INTEGER: + return std::unique_ptr( + new SequentialIntegerAttributeDecoder()); + case SEQUENTIAL_ATTRIBUTE_ENCODER_QUANTIZATION: + return std::unique_ptr( + new SequentialQuantizationAttributeDecoder()); +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED + case SEQUENTIAL_ATTRIBUTE_ENCODER_NORMALS: + return std::unique_ptr( + new SequentialNormalAttributeDecoder()); +#endif + default: + break; + } + // Unknown or unsupported decoder type. + return nullptr; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.h b/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.h new file mode 100644 index 000000000..abc1f3685 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.h @@ -0,0 +1,61 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_DECODERS_CONTROLLER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_DECODERS_CONTROLLER_H_ + +#include "draco/compression/attributes/attributes_decoder.h" +#include "draco/compression/attributes/points_sequencer.h" +#include "draco/compression/attributes/sequential_attribute_decoder.h" + +namespace draco { + +// A basic implementation of an attribute decoder that decodes data encoded by +// the SequentialAttributeEncodersController class. The +// SequentialAttributeDecodersController creates a single +// AttributeIndexedValuesDecoder for each of the decoded attribute, where the +// type of the values decoder is determined by the unique identifier that was +// encoded by the encoder. +class SequentialAttributeDecodersController : public AttributesDecoder { + public: + explicit SequentialAttributeDecodersController( + std::unique_ptr sequencer); + + bool DecodeAttributesDecoderData(DecoderBuffer *buffer) override; + bool DecodeAttributes(DecoderBuffer *buffer) override; + const PointAttribute *GetPortableAttribute( + int32_t point_attribute_id) override { + const int32_t loc_id = GetLocalIdForPointAttribute(point_attribute_id); + if (loc_id < 0) { + return nullptr; + } + return sequential_decoders_[loc_id]->GetPortableAttribute(); + } + + protected: + bool DecodePortableAttributes(DecoderBuffer *in_buffer) override; + bool DecodeDataNeededByPortableTransforms(DecoderBuffer *in_buffer) override; + bool TransformAttributesToOriginalFormat() override; + virtual std::unique_ptr CreateSequentialDecoder( + uint8_t decoder_type); + + private: + std::vector> sequential_decoders_; + std::vector point_ids_; + std::unique_ptr sequencer_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_DECODERS_CONTROLLER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.cc b/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.cc new file mode 100644 index 000000000..6bde3eeb3 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.cc @@ -0,0 +1,108 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_attribute_encoder.h" + +namespace draco { + +SequentialAttributeEncoder::SequentialAttributeEncoder() + : encoder_(nullptr), + attribute_(nullptr), + attribute_id_(-1), + is_parent_encoder_(false) {} + +bool SequentialAttributeEncoder::Init(PointCloudEncoder *encoder, + int attribute_id) { + encoder_ = encoder; + attribute_ = encoder_->point_cloud()->attribute(attribute_id); + attribute_id_ = attribute_id; + return true; +} + +bool SequentialAttributeEncoder::InitializeStandalone( + PointAttribute *attribute) { + attribute_ = attribute; + attribute_id_ = -1; + return true; +} + +bool SequentialAttributeEncoder::TransformAttributeToPortableFormat( + const std::vector &point_ids) { + // Default implementation doesn't transform the input data. + return true; +} + +bool SequentialAttributeEncoder::EncodePortableAttribute( + const std::vector &point_ids, EncoderBuffer *out_buffer) { + // Lossless encoding of the input values. + if (!EncodeValues(point_ids, out_buffer)) { + return false; + } + return true; +} + +bool SequentialAttributeEncoder::EncodeDataNeededByPortableTransform( + EncoderBuffer *out_buffer) { + // Default implementation doesn't transform the input data. + return true; +} + +bool SequentialAttributeEncoder::EncodeValues( + const std::vector &point_ids, EncoderBuffer *out_buffer) { + const int entry_size = static_cast(attribute_->byte_stride()); + const std::unique_ptr value_data_ptr(new uint8_t[entry_size]); + uint8_t *const value_data = value_data_ptr.get(); + // Encode all attribute values in their native raw format. + for (uint32_t i = 0; i < point_ids.size(); ++i) { + const AttributeValueIndex entry_id = attribute_->mapped_index(point_ids[i]); + attribute_->GetValue(entry_id, value_data); + out_buffer->Encode(value_data, entry_size); + } + return true; +} + +void SequentialAttributeEncoder::MarkParentAttribute() { + is_parent_encoder_ = true; +} + +bool SequentialAttributeEncoder::InitPredictionScheme( + PredictionSchemeInterface *ps) { + for (int i = 0; i < ps->GetNumParentAttributes(); ++i) { + const int att_id = encoder_->point_cloud()->GetNamedAttributeId( + ps->GetParentAttributeType(i)); + if (att_id == -1) { + return false; // Requested attribute does not exist. + } + parent_attributes_.push_back(att_id); + encoder_->MarkParentAttribute(att_id); + } + return true; +} + +bool SequentialAttributeEncoder::SetPredictionSchemeParentAttributes( + PredictionSchemeInterface *ps) { + for (int i = 0; i < ps->GetNumParentAttributes(); ++i) { + const int att_id = encoder_->point_cloud()->GetNamedAttributeId( + ps->GetParentAttributeType(i)); + if (att_id == -1) { + return false; // Requested attribute does not exist. + } + if (!ps->SetParentAttribute(encoder_->GetPortableAttribute(att_id))) { + return false; + } + } + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.h b/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.h new file mode 100644 index 000000000..00f62db89 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.h @@ -0,0 +1,134 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_ENCODER_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h" +#include "draco/compression/point_cloud/point_cloud_encoder.h" + +namespace draco { + +// A base class for encoding attribute values of a single attribute using a +// given sequence of point ids. The default implementation encodes all attribute +// values directly to the buffer but derived classes can perform any custom +// encoding (such as quantization) by overriding the EncodeValues() method. +class SequentialAttributeEncoder { + public: + SequentialAttributeEncoder(); + virtual ~SequentialAttributeEncoder() = default; + + // Method that can be used for custom initialization of an attribute encoder, + // such as creation of prediction schemes and initialization of attribute + // encoder dependencies. + // |encoder| is the parent PointCloudEncoder, + // |attribute_id| is the id of the attribute that is being encoded by this + // encoder. + // This method is automatically called by the PointCloudEncoder after all + // attribute encoders are created and it should not be called explicitly from + // other places. + virtual bool Init(PointCloudEncoder *encoder, int attribute_id); + + // Initialization for a specific attribute. This can be used mostly for + // standalone encoding of an attribute without an PointCloudEncoder. + virtual bool InitializeStandalone(PointAttribute *attribute); + + // Transforms attribute data into format that is going to be encoded + // losslessly. The transform itself can be lossy. + virtual bool TransformAttributeToPortableFormat( + const std::vector &point_ids); + + // Performs lossless encoding of the transformed attribute data. + virtual bool EncodePortableAttribute(const std::vector &point_ids, + EncoderBuffer *out_buffer); + + // Encodes any data related to the portable attribute transform. + virtual bool EncodeDataNeededByPortableTransform(EncoderBuffer *out_buffer); + + virtual bool IsLossyEncoder() const { return false; } + + int NumParentAttributes() const { + return static_cast(parent_attributes_.size()); + } + int GetParentAttributeId(int i) const { return parent_attributes_[i]; } + + const PointAttribute *GetPortableAttribute() const { + if (portable_attribute_ != nullptr) { + return portable_attribute_.get(); + } + return attribute(); + } + + // Called when this attribute encoder becomes a parent encoder of another + // encoder. + void MarkParentAttribute(); + + virtual uint8_t GetUniqueId() const { + return SEQUENTIAL_ATTRIBUTE_ENCODER_GENERIC; + } + + const PointAttribute *attribute() const { return attribute_; } + int attribute_id() const { return attribute_id_; } + PointCloudEncoder *encoder() const { return encoder_; } + + protected: + // Should be used to initialize newly created prediction scheme. + // Returns false when the initialization failed (in which case the scheme + // cannot be used). + virtual bool InitPredictionScheme(PredictionSchemeInterface *ps); + + // Sets parent attributes for a given prediction scheme. Must be called + // after all prediction schemes are initialized, but before the prediction + // scheme is used. + virtual bool SetPredictionSchemeParentAttributes( + PredictionSchemeInterface *ps); + + // Encodes all attribute values in the specified order. Should be overridden + // for specialized encoders. + virtual bool EncodeValues(const std::vector &point_ids, + EncoderBuffer *out_buffer); + + bool is_parent_encoder() const { return is_parent_encoder_; } + + void SetPortableAttribute(std::unique_ptr att) { + portable_attribute_ = std::move(att); + } + + // Returns a mutable attribute that should be filled by derived encoders with + // the transformed version of the attribute data. To get a public const + // version, use the GetPortableAttribute() method. + PointAttribute *portable_attribute() { return portable_attribute_.get(); } + + private: + PointCloudEncoder *encoder_; + const PointAttribute *attribute_; + int attribute_id_; + + // List of attribute encoders that need to be encoded before this attribute. + // E.g. The parent attributes may be used to predict values used by this + // attribute encoder. + std::vector parent_attributes_; + + bool is_parent_encoder_; + + // Attribute that stores transformed data from the source attribute after it + // is processed through the ApplyTransform() method. Attribute data stored + // within this attribute is guaranteed to be encoded losslessly and it can be + // safely used for prediction of other attributes. + std::unique_ptr portable_attribute_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.cc b/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.cc new file mode 100644 index 000000000..7d5d1eeff --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.cc @@ -0,0 +1,159 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_attribute_encoders_controller.h" +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED +#include "draco/compression/attributes/sequential_normal_attribute_encoder.h" +#endif +#include "draco/compression/attributes/sequential_quantization_attribute_encoder.h" +#include "draco/compression/point_cloud/point_cloud_encoder.h" + +namespace draco { + +SequentialAttributeEncodersController::SequentialAttributeEncodersController( + std::unique_ptr sequencer) + : sequencer_(std::move(sequencer)) {} + +SequentialAttributeEncodersController::SequentialAttributeEncodersController( + std::unique_ptr sequencer, int point_attrib_id) + : AttributesEncoder(point_attrib_id), sequencer_(std::move(sequencer)) {} + +bool SequentialAttributeEncodersController::Init(PointCloudEncoder *encoder, + const PointCloud *pc) { + if (!AttributesEncoder::Init(encoder, pc)) { + return false; + } + if (!CreateSequentialEncoders()) { + return false; + } + // Initialize all value encoders. + for (uint32_t i = 0; i < num_attributes(); ++i) { + const int32_t att_id = GetAttributeId(i); + if (!sequential_encoders_[i]->Init(encoder, att_id)) { + return false; + } + } + return true; +} + +bool SequentialAttributeEncodersController::EncodeAttributesEncoderData( + EncoderBuffer *out_buffer) { + if (!AttributesEncoder::EncodeAttributesEncoderData(out_buffer)) { + return false; + } + // Encode a unique id of every sequential encoder. + for (uint32_t i = 0; i < sequential_encoders_.size(); ++i) { + out_buffer->Encode(sequential_encoders_[i]->GetUniqueId()); + } + return true; +} + +bool SequentialAttributeEncodersController::EncodeAttributes( + EncoderBuffer *buffer) { + if (!sequencer_ || !sequencer_->GenerateSequence(&point_ids_)) { + return false; + } + return AttributesEncoder::EncodeAttributes(buffer); +} + +bool SequentialAttributeEncodersController:: + TransformAttributesToPortableFormat() { + for (uint32_t i = 0; i < sequential_encoders_.size(); ++i) { + if (!sequential_encoders_[i]->TransformAttributeToPortableFormat( + point_ids_)) { + return false; + } + } + return true; +} + +bool SequentialAttributeEncodersController::EncodePortableAttributes( + EncoderBuffer *out_buffer) { + for (uint32_t i = 0; i < sequential_encoders_.size(); ++i) { + if (!sequential_encoders_[i]->EncodePortableAttribute(point_ids_, + out_buffer)) { + return false; + } + } + return true; +} + +bool SequentialAttributeEncodersController:: + EncodeDataNeededByPortableTransforms(EncoderBuffer *out_buffer) { + for (uint32_t i = 0; i < sequential_encoders_.size(); ++i) { + if (!sequential_encoders_[i]->EncodeDataNeededByPortableTransform( + out_buffer)) { + return false; + } + } + return true; +} + +bool SequentialAttributeEncodersController::CreateSequentialEncoders() { + sequential_encoders_.resize(num_attributes()); + for (uint32_t i = 0; i < num_attributes(); ++i) { + sequential_encoders_[i] = CreateSequentialEncoder(i); + if (sequential_encoders_[i] == nullptr) { + return false; + } + if (i < sequential_encoder_marked_as_parent_.size()) { + if (sequential_encoder_marked_as_parent_[i]) { + sequential_encoders_[i]->MarkParentAttribute(); + } + } + } + return true; +} + +std::unique_ptr +SequentialAttributeEncodersController::CreateSequentialEncoder(int i) { + const int32_t att_id = GetAttributeId(i); + const PointAttribute *const att = encoder()->point_cloud()->attribute(att_id); + + switch (att->data_type()) { + case DT_UINT8: + case DT_INT8: + case DT_UINT16: + case DT_INT16: + case DT_UINT32: + case DT_INT32: + return std::unique_ptr( + new SequentialIntegerAttributeEncoder()); + case DT_FLOAT32: + if (encoder()->options()->GetAttributeInt(att_id, "quantization_bits", + -1) > 0) { +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED + if (att->attribute_type() == GeometryAttribute::NORMAL) { + // We currently only support normals with float coordinates + // and must be quantized. + return std::unique_ptr( + new SequentialNormalAttributeEncoder()); + } else { +#endif + return std::unique_ptr( + new SequentialQuantizationAttributeEncoder()); +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED + } +#endif + } + break; + default: + break; + } + // Return the default attribute encoder. + return std::unique_ptr( + new SequentialAttributeEncoder()); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.h b/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.h new file mode 100644 index 000000000..13c2704ec --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.h @@ -0,0 +1,115 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_ENCODERS_CONTROLLER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_ENCODERS_CONTROLLER_H_ + +#include "draco/compression/attributes/attributes_encoder.h" +#include "draco/compression/attributes/points_sequencer.h" +#include "draco/compression/attributes/sequential_attribute_encoder.h" + +namespace draco { + +// A basic implementation of an attribute encoder that can be used to encode +// an arbitrary set of attributes. The encoder creates a sequential attribute +// encoder for each encoded attribute (see sequential_attribute_encoder.h) and +// then it encodes all attribute values in an order defined by a point sequence +// generated in the GeneratePointSequence() method. The default implementation +// generates a linear sequence of all points, but derived classes can generate +// any custom sequence. +class SequentialAttributeEncodersController : public AttributesEncoder { + public: + explicit SequentialAttributeEncodersController( + std::unique_ptr sequencer); + SequentialAttributeEncodersController( + std::unique_ptr sequencer, int point_attrib_id); + + bool Init(PointCloudEncoder *encoder, const PointCloud *pc) override; + bool EncodeAttributesEncoderData(EncoderBuffer *out_buffer) override; + bool EncodeAttributes(EncoderBuffer *buffer) override; + uint8_t GetUniqueId() const override { return BASIC_ATTRIBUTE_ENCODER; } + + int NumParentAttributes(int32_t point_attribute_id) const override { + const int32_t loc_id = GetLocalIdForPointAttribute(point_attribute_id); + if (loc_id < 0) { + return 0; + } + return sequential_encoders_[loc_id]->NumParentAttributes(); + } + + int GetParentAttributeId(int32_t point_attribute_id, + int32_t parent_i) const override { + const int32_t loc_id = GetLocalIdForPointAttribute(point_attribute_id); + if (loc_id < 0) { + return -1; + } + return sequential_encoders_[loc_id]->GetParentAttributeId(parent_i); + } + + bool MarkParentAttribute(int32_t point_attribute_id) override { + const int32_t loc_id = GetLocalIdForPointAttribute(point_attribute_id); + if (loc_id < 0) { + return false; + } + // Mark the attribute encoder as parent (even when if it is not created + // yet). + if (sequential_encoder_marked_as_parent_.size() <= loc_id) { + sequential_encoder_marked_as_parent_.resize(loc_id + 1, false); + } + sequential_encoder_marked_as_parent_[loc_id] = true; + + if (sequential_encoders_.size() <= loc_id) { + return true; // Sequential encoders not generated yet. + } + sequential_encoders_[loc_id]->MarkParentAttribute(); + return true; + } + + const PointAttribute *GetPortableAttribute( + int32_t point_attribute_id) override { + const int32_t loc_id = GetLocalIdForPointAttribute(point_attribute_id); + if (loc_id < 0) { + return nullptr; + } + return sequential_encoders_[loc_id]->GetPortableAttribute(); + } + + protected: + bool TransformAttributesToPortableFormat() override; + bool EncodePortableAttributes(EncoderBuffer *out_buffer) override; + bool EncodeDataNeededByPortableTransforms(EncoderBuffer *out_buffer) override; + + // Creates all sequential encoders (one for each attribute associated with the + // encoder). + virtual bool CreateSequentialEncoders(); + + // Create a sequential encoder for a given attribute based on the attribute + // type + // and the provided encoder options. + virtual std::unique_ptr CreateSequentialEncoder( + int i); + + private: + std::vector> sequential_encoders_; + + // Flag for each sequential attribute encoder indicating whether it was marked + // as parent attribute or not. + std::vector sequential_encoder_marked_as_parent_; + std::vector point_ids_; + std::unique_ptr sequencer_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_ENCODERS_CONTROLLER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.cc b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.cc new file mode 100644 index 000000000..83f42125a --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.cc @@ -0,0 +1,240 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_integer_attribute_decoder.h" + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_decoding_transform.h" +#include "draco/compression/entropy/symbol_decoding.h" + +namespace draco { + +SequentialIntegerAttributeDecoder::SequentialIntegerAttributeDecoder() {} + +bool SequentialIntegerAttributeDecoder::Init(PointCloudDecoder *decoder, + int attribute_id) { + if (!SequentialAttributeDecoder::Init(decoder, attribute_id)) { + return false; + } + return true; +} + +bool SequentialIntegerAttributeDecoder::TransformAttributeToOriginalFormat( + const std::vector &point_ids) { +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (decoder() && + decoder()->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) { + return true; // Don't revert the transform here for older files. + } +#endif + return StoreValues(static_cast(point_ids.size())); +} + +bool SequentialIntegerAttributeDecoder::DecodeValues( + const std::vector &point_ids, DecoderBuffer *in_buffer) { + // Decode prediction scheme. + int8_t prediction_scheme_method; + if (!in_buffer->Decode(&prediction_scheme_method)) { + return false; + } + if (prediction_scheme_method != PREDICTION_NONE) { + int8_t prediction_transform_type; + if (!in_buffer->Decode(&prediction_transform_type)) { + return false; + } + // Check that decoded prediction scheme transform type is valid. + if (prediction_transform_type < PREDICTION_TRANSFORM_NONE || + prediction_transform_type >= NUM_PREDICTION_SCHEME_TRANSFORM_TYPES) { + return false; + } + prediction_scheme_ = CreateIntPredictionScheme( + static_cast(prediction_scheme_method), + static_cast(prediction_transform_type)); + } + + if (prediction_scheme_) { + if (!InitPredictionScheme(prediction_scheme_.get())) { + return false; + } + } + + if (!DecodeIntegerValues(point_ids, in_buffer)) { + return false; + } + +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + const int32_t num_values = static_cast(point_ids.size()); + if (decoder() && + decoder()->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) { + // For older files, revert the transform right after we decode the data. + if (!StoreValues(num_values)) { + return false; + } + } +#endif + return true; +} + +std::unique_ptr> +SequentialIntegerAttributeDecoder::CreateIntPredictionScheme( + PredictionSchemeMethod method, + PredictionSchemeTransformType transform_type) { + if (transform_type != PREDICTION_TRANSFORM_WRAP) { + return nullptr; // For now we support only wrap transform. + } + return CreatePredictionSchemeForDecoder< + int32_t, PredictionSchemeWrapDecodingTransform>( + method, attribute_id(), decoder()); +} + +bool SequentialIntegerAttributeDecoder::DecodeIntegerValues( + const std::vector &point_ids, DecoderBuffer *in_buffer) { + const int num_components = GetNumValueComponents(); + if (num_components <= 0) { + return false; + } + const size_t num_entries = point_ids.size(); + const size_t num_values = num_entries * num_components; + PreparePortableAttribute(static_cast(num_entries), num_components); + int32_t *const portable_attribute_data = GetPortableAttributeData(); + if (portable_attribute_data == nullptr) { + return false; + } + uint8_t compressed; + if (!in_buffer->Decode(&compressed)) { + return false; + } + if (compressed > 0) { + // Decode compressed values. + if (!DecodeSymbols(static_cast(num_values), num_components, + in_buffer, + reinterpret_cast(portable_attribute_data))) { + return false; + } + } else { + // Decode the integer data directly. + // Get the number of bytes for a given entry. + uint8_t num_bytes; + if (!in_buffer->Decode(&num_bytes)) { + return false; + } + if (num_bytes == DataTypeLength(DT_INT32)) { + if (portable_attribute()->buffer()->data_size() < + sizeof(int32_t) * num_values) { + return false; + } + if (!in_buffer->Decode(portable_attribute_data, + sizeof(int32_t) * num_values)) { + return false; + } + } else { + if (portable_attribute()->buffer()->data_size() < + num_bytes * num_values) { + return false; + } + if (in_buffer->remaining_size() < + static_cast(num_bytes) * static_cast(num_values)) { + return false; + } + for (size_t i = 0; i < num_values; ++i) { + if (!in_buffer->Decode(portable_attribute_data + i, num_bytes)) + return false; + } + } + } + + if (num_values > 0 && (prediction_scheme_ == nullptr || + !prediction_scheme_->AreCorrectionsPositive())) { + // Convert the values back to the original signed format. + ConvertSymbolsToSignedInts( + reinterpret_cast(portable_attribute_data), + static_cast(num_values), portable_attribute_data); + } + + // If the data was encoded with a prediction scheme, we must revert it. + if (prediction_scheme_) { + if (!prediction_scheme_->DecodePredictionData(in_buffer)) { + return false; + } + + if (num_values > 0) { + if (!prediction_scheme_->ComputeOriginalValues( + portable_attribute_data, portable_attribute_data, + static_cast(num_values), num_components, point_ids.data())) { + return false; + } + } + } + return true; +} + +bool SequentialIntegerAttributeDecoder::StoreValues(uint32_t num_values) { + switch (attribute()->data_type()) { + case DT_UINT8: + StoreTypedValues(num_values); + break; + case DT_INT8: + StoreTypedValues(num_values); + break; + case DT_UINT16: + StoreTypedValues(num_values); + break; + case DT_INT16: + StoreTypedValues(num_values); + break; + case DT_UINT32: + StoreTypedValues(num_values); + break; + case DT_INT32: + StoreTypedValues(num_values); + break; + default: + return false; + } + return true; +} + +template +void SequentialIntegerAttributeDecoder::StoreTypedValues(uint32_t num_values) { + const int num_components = attribute()->num_components(); + const int entry_size = sizeof(AttributeTypeT) * num_components; + const std::unique_ptr att_val( + new AttributeTypeT[num_components]); + const int32_t *const portable_attribute_data = GetPortableAttributeData(); + int val_id = 0; + int out_byte_pos = 0; + for (uint32_t i = 0; i < num_values; ++i) { + for (int c = 0; c < num_components; ++c) { + const AttributeTypeT value = + static_cast(portable_attribute_data[val_id++]); + att_val[c] = value; + } + // Store the integer value into the attribute buffer. + attribute()->buffer()->Write(out_byte_pos, att_val.get(), entry_size); + out_byte_pos += entry_size; + } +} + +void SequentialIntegerAttributeDecoder::PreparePortableAttribute( + int num_entries, int num_components) { + GeometryAttribute va; + va.Init(attribute()->attribute_type(), nullptr, num_components, DT_INT32, + false, num_components * DataTypeLength(DT_INT32), 0); + std::unique_ptr port_att(new PointAttribute(va)); + port_att->SetIdentityMapping(); + port_att->Reset(num_entries); + SetPortableAttribute(std::move(port_att)); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.h b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.h new file mode 100644 index 000000000..ef48ed817 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.h @@ -0,0 +1,76 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_INTEGER_ATTRIBUTE_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_INTEGER_ATTRIBUTE_DECODER_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h" +#include "draco/compression/attributes/sequential_attribute_decoder.h" +#include "draco/draco_features.h" + +namespace draco { + +// Decoder for attributes encoded with the SequentialIntegerAttributeEncoder. +class SequentialIntegerAttributeDecoder : public SequentialAttributeDecoder { + public: + SequentialIntegerAttributeDecoder(); + bool Init(PointCloudDecoder *decoder, int attribute_id) override; + + bool TransformAttributeToOriginalFormat( + const std::vector &point_ids) override; + + protected: + bool DecodeValues(const std::vector &point_ids, + DecoderBuffer *in_buffer) override; + virtual bool DecodeIntegerValues(const std::vector &point_ids, + DecoderBuffer *in_buffer); + + // Returns a prediction scheme that should be used for decoding of the + // integer values. + virtual std::unique_ptr> + CreateIntPredictionScheme(PredictionSchemeMethod method, + PredictionSchemeTransformType transform_type); + + // Returns the number of integer attribute components. In general, this + // can be different from the number of components of the input attribute. + virtual int32_t GetNumValueComponents() const { + return attribute()->num_components(); + } + + // Called after all integer values are decoded. The implementation should + // use this method to store the values into the attribute. + virtual bool StoreValues(uint32_t num_values); + + void PreparePortableAttribute(int num_entries, int num_components); + + int32_t *GetPortableAttributeData() { + if (portable_attribute()->size() == 0) { + return nullptr; + } + return reinterpret_cast( + portable_attribute()->GetAddress(AttributeValueIndex(0))); + } + + private: + // Stores decoded values into the attribute with a data type AttributeTypeT. + template + void StoreTypedValues(uint32_t num_values); + + std::unique_ptr> + prediction_scheme_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_INTEGER_ATTRIBUTE_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.cc b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.cc new file mode 100644 index 000000000..e66a0a8a4 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.cc @@ -0,0 +1,233 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_integer_attribute_encoder.h" + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_encoding_transform.h" +#include "draco/compression/entropy/symbol_encoding.h" +#include "draco/core/bit_utils.h" + +namespace draco { + +SequentialIntegerAttributeEncoder::SequentialIntegerAttributeEncoder() {} + +bool SequentialIntegerAttributeEncoder::Init(PointCloudEncoder *encoder, + int attribute_id) { + if (!SequentialAttributeEncoder::Init(encoder, attribute_id)) { + return false; + } + if (GetUniqueId() == SEQUENTIAL_ATTRIBUTE_ENCODER_INTEGER) { + // When encoding integers, this encoder currently works only for integer + // attributes up to 32 bits. + switch (attribute()->data_type()) { + case DT_INT8: + case DT_UINT8: + case DT_INT16: + case DT_UINT16: + case DT_INT32: + case DT_UINT32: + break; + default: + return false; + } + } + // Init prediction scheme. + const PredictionSchemeMethod prediction_scheme_method = + GetPredictionMethodFromOptions(attribute_id, *encoder->options()); + + prediction_scheme_ = CreateIntPredictionScheme(prediction_scheme_method); + + if (prediction_scheme_ && !InitPredictionScheme(prediction_scheme_.get())) { + prediction_scheme_ = nullptr; + } + + return true; +} + +bool SequentialIntegerAttributeEncoder::TransformAttributeToPortableFormat( + const std::vector &point_ids) { + if (encoder()) { + if (!PrepareValues(point_ids, encoder()->point_cloud()->num_points())) { + return false; + } + } else { + if (!PrepareValues(point_ids, 0)) { + return false; + } + } + + // Update point to attribute mapping with the portable attribute if the + // attribute is a parent attribute (for now, we can skip it otherwise). + if (is_parent_encoder()) { + // First create map between original attribute value indices and new ones + // (determined by the encoding order). + const PointAttribute *const orig_att = attribute(); + PointAttribute *const portable_att = portable_attribute(); + IndexTypeVector + value_to_value_map(orig_att->size()); + for (int i = 0; i < point_ids.size(); ++i) { + value_to_value_map[orig_att->mapped_index(point_ids[i])] = + AttributeValueIndex(i); + } + if (portable_att->is_mapping_identity()) { + portable_att->SetExplicitMapping(encoder()->point_cloud()->num_points()); + } + // Go over all points of the original attribute and update the mapping in + // the portable attribute. + for (PointIndex i(0); i < encoder()->point_cloud()->num_points(); ++i) { + portable_att->SetPointMapEntry( + i, value_to_value_map[orig_att->mapped_index(i)]); + } + } + return true; +} + +std::unique_ptr> +SequentialIntegerAttributeEncoder::CreateIntPredictionScheme( + PredictionSchemeMethod method) { + return CreatePredictionSchemeForEncoder< + int32_t, PredictionSchemeWrapEncodingTransform>( + method, attribute_id(), encoder()); +} + +bool SequentialIntegerAttributeEncoder::EncodeValues( + const std::vector &point_ids, EncoderBuffer *out_buffer) { + // Initialize general quantization data. + const PointAttribute *const attrib = attribute(); + if (attrib->size() == 0) { + return true; + } + + int8_t prediction_scheme_method = PREDICTION_NONE; + if (prediction_scheme_) { + if (!SetPredictionSchemeParentAttributes(prediction_scheme_.get())) { + return false; + } + prediction_scheme_method = + static_cast(prediction_scheme_->GetPredictionMethod()); + } + out_buffer->Encode(prediction_scheme_method); + if (prediction_scheme_) { + out_buffer->Encode( + static_cast(prediction_scheme_->GetTransformType())); + } + + const int num_components = portable_attribute()->num_components(); + const int num_values = + static_cast(num_components * portable_attribute()->size()); + const int32_t *const portable_attribute_data = GetPortableAttributeData(); + + // We need to keep the portable data intact, but several encoding steps can + // result in changes of this data, e.g., by applying prediction schemes that + // change the data in place. To preserve the portable data we store and + // process all encoded data in a separate array. + std::vector encoded_data(num_values); + + // All integer values are initialized. Process them using the prediction + // scheme if we have one. + if (prediction_scheme_) { + prediction_scheme_->ComputeCorrectionValues( + portable_attribute_data, &encoded_data[0], num_values, num_components, + point_ids.data()); + } + + if (prediction_scheme_ == nullptr || + !prediction_scheme_->AreCorrectionsPositive()) { + const int32_t *const input = + prediction_scheme_ ? encoded_data.data() : portable_attribute_data; + ConvertSignedIntsToSymbols(input, num_values, + reinterpret_cast(&encoded_data[0])); + } + + if (encoder() == nullptr || encoder()->options()->GetGlobalBool( + "use_built_in_attribute_compression", true)) { + out_buffer->Encode(static_cast(1)); + Options symbol_encoding_options; + if (encoder() != nullptr) { + SetSymbolEncodingCompressionLevel(&symbol_encoding_options, + 10 - encoder()->options()->GetSpeed()); + } + if (!EncodeSymbols(reinterpret_cast(encoded_data.data()), + static_cast(point_ids.size()) * num_components, + num_components, &symbol_encoding_options, out_buffer)) { + return false; + } + } else { + // No compression. Just store the raw integer values, using the number of + // bytes as needed. + + // To compute the maximum bit-length, first OR all values. + uint32_t masked_value = 0; + for (uint32_t i = 0; i < static_cast(num_values); ++i) { + masked_value |= encoded_data[i]; + } + // Compute the msb of the ORed value. + int value_msb_pos = 0; + if (masked_value != 0) { + value_msb_pos = MostSignificantBit(masked_value); + } + const int num_bytes = 1 + value_msb_pos / 8; + + out_buffer->Encode(static_cast(0)); + out_buffer->Encode(static_cast(num_bytes)); + + if (num_bytes == DataTypeLength(DT_INT32)) { + out_buffer->Encode(encoded_data.data(), sizeof(int32_t) * num_values); + } else { + for (uint32_t i = 0; i < static_cast(num_values); ++i) { + out_buffer->Encode(encoded_data.data() + i, num_bytes); + } + } + } + if (prediction_scheme_) { + prediction_scheme_->EncodePredictionData(out_buffer); + } + return true; +} + +bool SequentialIntegerAttributeEncoder::PrepareValues( + const std::vector &point_ids, int num_points) { + // Convert all values to int32_t format. + const PointAttribute *const attrib = attribute(); + const int num_components = attrib->num_components(); + const int num_entries = static_cast(point_ids.size()); + PreparePortableAttribute(num_entries, num_components, num_points); + int32_t dst_index = 0; + int32_t *const portable_attribute_data = GetPortableAttributeData(); + for (PointIndex pi : point_ids) { + const AttributeValueIndex att_id = attrib->mapped_index(pi); + if (!attrib->ConvertValue(att_id, + portable_attribute_data + dst_index)) { + return false; + } + dst_index += num_components; + } + return true; +} + +void SequentialIntegerAttributeEncoder::PreparePortableAttribute( + int num_entries, int num_components, int num_points) { + GeometryAttribute va; + va.Init(attribute()->attribute_type(), nullptr, num_components, DT_INT32, + false, num_components * DataTypeLength(DT_INT32), 0); + std::unique_ptr port_att(new PointAttribute(va)); + port_att->Reset(num_entries); + SetPortableAttribute(std::move(port_att)); + if (num_points) { + portable_attribute()->SetExplicitMapping(num_points); + } +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.h b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.h new file mode 100644 index 000000000..c1d6222ef --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.h @@ -0,0 +1,67 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_INTEGER_ATTRIBUTE_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_INTEGER_ATTRIBUTE_ENCODER_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h" +#include "draco/compression/attributes/sequential_attribute_encoder.h" + +namespace draco { + +// Attribute encoder designed for lossless encoding of integer attributes. The +// attribute values can be pre-processed by a prediction scheme and compressed +// with a built-in entropy coder. +class SequentialIntegerAttributeEncoder : public SequentialAttributeEncoder { + public: + SequentialIntegerAttributeEncoder(); + uint8_t GetUniqueId() const override { + return SEQUENTIAL_ATTRIBUTE_ENCODER_INTEGER; + } + + bool Init(PointCloudEncoder *encoder, int attribute_id) override; + bool TransformAttributeToPortableFormat( + const std::vector &point_ids) override; + + protected: + bool EncodeValues(const std::vector &point_ids, + EncoderBuffer *out_buffer) override; + + // Returns a prediction scheme that should be used for encoding of the + // integer values. + virtual std::unique_ptr> + CreateIntPredictionScheme(PredictionSchemeMethod method); + + // Prepares the integer values that are going to be encoded. + virtual bool PrepareValues(const std::vector &point_ids, + int num_points); + + void PreparePortableAttribute(int num_entries, int num_components, + int num_points); + + int32_t *GetPortableAttributeData() { + return reinterpret_cast( + portable_attribute()->GetAddress(AttributeValueIndex(0))); + } + + private: + // Optional prediction scheme can be used to modify the integer values in + // order to make them easier to compress. + std::unique_ptr> + prediction_scheme_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_INTEGER_ATTRIBUTE_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoding_test.cc b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoding_test.cc new file mode 100644 index 000000000..44485e679 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoding_test.cc @@ -0,0 +1,64 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include + +#include "draco/compression/attributes/sequential_integer_attribute_decoder.h" +#include "draco/compression/attributes/sequential_integer_attribute_encoder.h" +#include "draco/compression/config/compression_shared.h" +#include "draco/core/draco_test_base.h" + +namespace draco { + +class SequentialIntegerAttributeEncodingTest : public ::testing::Test { + protected: +}; + +TEST_F(SequentialIntegerAttributeEncodingTest, DoesCompress) { + // This test verifies that IntegerEncoding encodes and decodes the given data. + const std::vector values{1, 8, 7, 5, 5, 5, 9, + 155, -6, -9, 9, 125, 1, 0}; + PointAttribute pa; + pa.Init(GeometryAttribute::GENERIC, 1, DT_INT32, false, values.size()); + for (uint32_t i = 0; i < values.size(); ++i) { + pa.SetAttributeValue(AttributeValueIndex(i), &values[i]); + } + // List of point ids from 0 to point_ids.size() - 1. + std::vector point_ids(values.size()); + std::iota(point_ids.begin(), point_ids.end(), 0); + + EncoderBuffer out_buf; + SequentialIntegerAttributeEncoder ie; + ASSERT_TRUE(ie.InitializeStandalone(&pa)); + ASSERT_TRUE(ie.TransformAttributeToPortableFormat(point_ids)); + ASSERT_TRUE(ie.EncodePortableAttribute(point_ids, &out_buf)); + ASSERT_TRUE(ie.EncodeDataNeededByPortableTransform(&out_buf)); + + DecoderBuffer in_buf; + in_buf.Init(out_buf.data(), out_buf.size()); + in_buf.set_bitstream_version(kDracoMeshBitstreamVersion); + SequentialIntegerAttributeDecoder id; + ASSERT_TRUE(id.InitializeStandalone(&pa)); + ASSERT_TRUE(id.DecodePortableAttribute(point_ids, &in_buf)); + ASSERT_TRUE(id.DecodeDataNeededByPortableTransform(point_ids, &in_buf)); + ASSERT_TRUE(id.TransformAttributeToOriginalFormat(point_ids)); + + for (uint32_t i = 0; i < values.size(); ++i) { + int32_t entry_val; + pa.GetValue(AttributeValueIndex(i), &entry_val); + ASSERT_EQ(entry_val, values[i]); + } +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.cc b/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.cc new file mode 100644 index 000000000..de36c1c36 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.cc @@ -0,0 +1,76 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_normal_attribute_decoder.h" + +#include "draco/compression/attributes/normal_compression_utils.h" + +namespace draco { + +SequentialNormalAttributeDecoder::SequentialNormalAttributeDecoder() {} + +bool SequentialNormalAttributeDecoder::Init(PointCloudDecoder *decoder, + int attribute_id) { + if (!SequentialIntegerAttributeDecoder::Init(decoder, attribute_id)) { + return false; + } + // Currently, this encoder works only for 3-component normal vectors. + if (attribute()->num_components() != 3) { + return false; + } + // Also the data type must be DT_FLOAT32. + if (attribute()->data_type() != DT_FLOAT32) { + return false; + } + return true; +} + +bool SequentialNormalAttributeDecoder::DecodeIntegerValues( + const std::vector &point_ids, DecoderBuffer *in_buffer) { +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (decoder()->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) { + // Note: in older bitstreams, we do not have a PortableAttribute() decoded + // at this stage so we cannot pass it down to the DecodeParameters() call. + // It still works fine for octahedral transform because it does not need to + // use any data from the attribute. + if (!octahedral_transform_.DecodeParameters(*attribute(), in_buffer)) { + return false; + } + } +#endif + return SequentialIntegerAttributeDecoder::DecodeIntegerValues(point_ids, + in_buffer); +} + +bool SequentialNormalAttributeDecoder::DecodeDataNeededByPortableTransform( + const std::vector &point_ids, DecoderBuffer *in_buffer) { + if (decoder()->bitstream_version() >= DRACO_BITSTREAM_VERSION(2, 0)) { + // For newer file version, decode attribute transform data here. + if (!octahedral_transform_.DecodeParameters(*GetPortableAttribute(), + in_buffer)) { + return false; + } + } + + // Store the decoded transform data in portable attribute. + return octahedral_transform_.TransferToAttribute(portable_attribute()); +} + +bool SequentialNormalAttributeDecoder::StoreValues(uint32_t num_points) { + // Convert all quantized values back to floats. + return octahedral_transform_.InverseTransformAttribute( + *GetPortableAttribute(), attribute()); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.h b/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.h new file mode 100644 index 000000000..8c2d801b7 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.h @@ -0,0 +1,83 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_DECODER_H_ + +#include "draco/attributes/attribute_octahedron_transform.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h" +#include "draco/compression/attributes/sequential_integer_attribute_decoder.h" +#include "draco/draco_features.h" + +namespace draco { + +// Decoder for attributes encoded with SequentialNormalAttributeEncoder. +class SequentialNormalAttributeDecoder + : public SequentialIntegerAttributeDecoder { + public: + SequentialNormalAttributeDecoder(); + bool Init(PointCloudDecoder *decoder, int attribute_id) override; + + protected: + int32_t GetNumValueComponents() const override { + return 2; // We quantize everything into two components. + } + bool DecodeIntegerValues(const std::vector &point_ids, + DecoderBuffer *in_buffer) override; + bool DecodeDataNeededByPortableTransform( + const std::vector &point_ids, + DecoderBuffer *in_buffer) override; + bool StoreValues(uint32_t num_points) override; + + private: + AttributeOctahedronTransform octahedral_transform_; + + std::unique_ptr> + CreateIntPredictionScheme( + PredictionSchemeMethod method, + PredictionSchemeTransformType transform_type) override { + switch (transform_type) { +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + case PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON: { + typedef PredictionSchemeNormalOctahedronDecodingTransform + Transform; + // At this point the decoder has not read the quantization bits, + // which is why we must construct the transform by default. + // See Transform.DecodeTransformData for more details. + return CreatePredictionSchemeForDecoder( + method, attribute_id(), decoder()); + } +#endif + case PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON_CANONICALIZED: { + typedef PredictionSchemeNormalOctahedronCanonicalizedDecodingTransform< + int32_t> + Transform; + // At this point the decoder has not read the quantization bits, + // which is why we must construct the transform by default. + // See Transform.DecodeTransformData for more details. + return CreatePredictionSchemeForDecoder( + method, attribute_id(), decoder()); + } + default: + return nullptr; // Currently, we support only octahedron transform and + // octahedron transform canonicalized. + } + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.cc b/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.cc new file mode 100644 index 000000000..2e20e89e6 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.cc @@ -0,0 +1,57 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_normal_attribute_encoder.h" + +#include "draco/compression/attributes/normal_compression_utils.h" + +namespace draco { + +bool SequentialNormalAttributeEncoder::Init(PointCloudEncoder *encoder, + int attribute_id) { + if (!SequentialIntegerAttributeEncoder::Init(encoder, attribute_id)) + return false; + // Currently this encoder works only for 3-component normal vectors. + if (attribute()->num_components() != 3) { + return false; + } + + // Initialize AttributeOctahedronTransform. + const int quantization_bits = encoder->options()->GetAttributeInt( + attribute_id, "quantization_bits", -1); + if (quantization_bits < 1) { + return false; + } + attribute_octahedron_transform_.SetParameters(quantization_bits); + return true; +} + +bool SequentialNormalAttributeEncoder::EncodeDataNeededByPortableTransform( + EncoderBuffer *out_buffer) { + return attribute_octahedron_transform_.EncodeParameters(out_buffer); +} + +bool SequentialNormalAttributeEncoder::PrepareValues( + const std::vector &point_ids, int num_points) { + auto portable_att = attribute_octahedron_transform_.InitTransformedAttribute( + *(attribute()), point_ids.size()); + if (!attribute_octahedron_transform_.TransformAttribute( + *(attribute()), point_ids, portable_att.get())) { + return false; + } + SetPortableAttribute(std::move(portable_att)); + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.h b/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.h new file mode 100644 index 000000000..53705c598 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.h @@ -0,0 +1,82 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_ENCODER_H_ + +#include "draco/attributes/attribute_octahedron_transform.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h" +#include "draco/compression/attributes/sequential_integer_attribute_encoder.h" +#include "draco/compression/config/compression_shared.h" + +namespace draco { + +// Class for encoding normal vectors using an octahedral encoding, see Cigolle +// et al.'14 “A Survey of Efficient Representations for Independent Unit +// Vectors”. Compared to the basic quantization encoder, this encoder results +// in a better compression rate under the same accuracy settings. Note that this +// encoder doesn't preserve the lengths of input vectors, therefore it will not +// work correctly when the input values are not normalized. +class SequentialNormalAttributeEncoder + : public SequentialIntegerAttributeEncoder { + public: + uint8_t GetUniqueId() const override { + return SEQUENTIAL_ATTRIBUTE_ENCODER_NORMALS; + } + bool IsLossyEncoder() const override { return true; } + + bool EncodeDataNeededByPortableTransform(EncoderBuffer *out_buffer) override; + + protected: + bool Init(PointCloudEncoder *encoder, int attribute_id) override; + + // Put quantized values in portable attribute for sequential encoding. + bool PrepareValues(const std::vector &point_ids, + int num_points) override; + + std::unique_ptr> + CreateIntPredictionScheme(PredictionSchemeMethod /* method */) override { + typedef PredictionSchemeNormalOctahedronCanonicalizedEncodingTransform< + int32_t> + Transform; + const int32_t quantization_bits = encoder()->options()->GetAttributeInt( + attribute_id(), "quantization_bits", -1); + const int32_t max_value = (1 << quantization_bits) - 1; + const Transform transform(max_value); + const PredictionSchemeMethod default_prediction_method = + SelectPredictionMethod(attribute_id(), encoder()); + const int32_t prediction_method = encoder()->options()->GetAttributeInt( + attribute_id(), "prediction_scheme", default_prediction_method); + + if (prediction_method == MESH_PREDICTION_GEOMETRIC_NORMAL) { + return CreatePredictionSchemeForEncoder( + MESH_PREDICTION_GEOMETRIC_NORMAL, attribute_id(), encoder(), + transform); + } + if (prediction_method == PREDICTION_DIFFERENCE) { + return CreatePredictionSchemeForEncoder( + PREDICTION_DIFFERENCE, attribute_id(), encoder(), transform); + } + DRACO_DCHECK(false); // Should never be reached. + return nullptr; + } + + // Used for the conversion to quantized normals in octahedral format. + AttributeOctahedronTransform attribute_octahedron_transform_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.cc b/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.cc new file mode 100644 index 000000000..3d306e7da --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.cc @@ -0,0 +1,88 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_quantization_attribute_decoder.h" + +#include "draco/core/quantization_utils.h" + +namespace draco { + +SequentialQuantizationAttributeDecoder:: + SequentialQuantizationAttributeDecoder() {} + +bool SequentialQuantizationAttributeDecoder::Init(PointCloudDecoder *decoder, + int attribute_id) { + if (!SequentialIntegerAttributeDecoder::Init(decoder, attribute_id)) { + return false; + } + const PointAttribute *const attribute = + decoder->point_cloud()->attribute(attribute_id); + // Currently we can quantize only floating point arguments. + if (attribute->data_type() != DT_FLOAT32) { + return false; + } + return true; +} + +bool SequentialQuantizationAttributeDecoder::DecodeIntegerValues( + const std::vector &point_ids, DecoderBuffer *in_buffer) { +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (decoder()->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0) && + !DecodeQuantizedDataInfo()) { + return false; + } +#endif + return SequentialIntegerAttributeDecoder::DecodeIntegerValues(point_ids, + in_buffer); +} + +bool SequentialQuantizationAttributeDecoder:: + DecodeDataNeededByPortableTransform( + const std::vector &point_ids, DecoderBuffer *in_buffer) { + if (decoder()->bitstream_version() >= DRACO_BITSTREAM_VERSION(2, 0)) { + // Decode quantization data here only for files with bitstream version 2.0+ + if (!DecodeQuantizedDataInfo()) { + return false; + } + } + + // Store the decoded transform data in portable attribute; + return quantization_transform_.TransferToAttribute(portable_attribute()); +} + +bool SequentialQuantizationAttributeDecoder::StoreValues(uint32_t num_points) { + return DequantizeValues(num_points); +} + +bool SequentialQuantizationAttributeDecoder::DecodeQuantizedDataInfo() { + // Get attribute used as source for decoding. + auto att = GetPortableAttribute(); + if (att == nullptr) { + // This should happen only in the backward compatibility mode. It will still + // work fine for this case because the only thing the quantization transform + // cares about is the number of components that is the same for both source + // and target attributes. + att = attribute(); + } + return quantization_transform_.DecodeParameters(*att, decoder()->buffer()); +} + +bool SequentialQuantizationAttributeDecoder::DequantizeValues( + uint32_t num_values) { + // Convert all quantized values back to floats. + return quantization_transform_.InverseTransformAttribute( + *GetPortableAttribute(), attribute()); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.h b/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.h new file mode 100644 index 000000000..ad372dcd8 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.h @@ -0,0 +1,52 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_DECODER_H_ + +#include "draco/attributes/attribute_quantization_transform.h" +#include "draco/compression/attributes/sequential_integer_attribute_decoder.h" +#include "draco/draco_features.h" + +namespace draco { + +// Decoder for attribute values encoded with the +// SequentialQuantizationAttributeEncoder. +class SequentialQuantizationAttributeDecoder + : public SequentialIntegerAttributeDecoder { + public: + SequentialQuantizationAttributeDecoder(); + bool Init(PointCloudDecoder *decoder, int attribute_id) override; + + protected: + bool DecodeIntegerValues(const std::vector &point_ids, + DecoderBuffer *in_buffer) override; + bool DecodeDataNeededByPortableTransform( + const std::vector &point_ids, + DecoderBuffer *in_buffer) override; + bool StoreValues(uint32_t num_points) override; + + // Decodes data necessary for dequantizing the encoded values. + virtual bool DecodeQuantizedDataInfo(); + + // Dequantizes all values and stores them into the output attribute. + virtual bool DequantizeValues(uint32_t num_values); + + private: + AttributeQuantizationTransform quantization_transform_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.cc b/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.cc new file mode 100644 index 000000000..d3666f7a4 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.cc @@ -0,0 +1,86 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_quantization_attribute_encoder.h" + +#include "draco/core/quantization_utils.h" + +namespace draco { + +SequentialQuantizationAttributeEncoder:: + SequentialQuantizationAttributeEncoder() {} + +bool SequentialQuantizationAttributeEncoder::Init(PointCloudEncoder *encoder, + int attribute_id) { + if (!SequentialIntegerAttributeEncoder::Init(encoder, attribute_id)) { + return false; + } + // This encoder currently works only for floating point attributes. + const PointAttribute *const attribute = + encoder->point_cloud()->attribute(attribute_id); + if (attribute->data_type() != DT_FLOAT32) { + return false; + } + + // Initialize AttributeQuantizationTransform. + const int quantization_bits = encoder->options()->GetAttributeInt( + attribute_id, "quantization_bits", -1); + if (quantization_bits < 1) { + return false; + } + if (encoder->options()->IsAttributeOptionSet(attribute_id, + "quantization_origin") && + encoder->options()->IsAttributeOptionSet(attribute_id, + "quantization_range")) { + // Quantization settings are explicitly specified in the provided options. + std::vector quantization_origin(attribute->num_components()); + encoder->options()->GetAttributeVector(attribute_id, "quantization_origin", + attribute->num_components(), + &quantization_origin[0]); + const float range = encoder->options()->GetAttributeFloat( + attribute_id, "quantization_range", 1.f); + if (!attribute_quantization_transform_.SetParameters( + quantization_bits, quantization_origin.data(), + attribute->num_components(), range)) { + return false; + } + } else { + // Compute quantization settings from the attribute values. + if (!attribute_quantization_transform_.ComputeParameters( + *attribute, quantization_bits)) { + return false; + } + } + return true; +} + +bool SequentialQuantizationAttributeEncoder:: + EncodeDataNeededByPortableTransform(EncoderBuffer *out_buffer) { + return attribute_quantization_transform_.EncodeParameters(out_buffer); +} + +bool SequentialQuantizationAttributeEncoder::PrepareValues( + const std::vector &point_ids, int num_points) { + auto portable_attribute = + attribute_quantization_transform_.InitTransformedAttribute( + *attribute(), point_ids.size()); + if (!attribute_quantization_transform_.TransformAttribute( + *(attribute()), point_ids, portable_attribute.get())) { + return false; + } + SetPortableAttribute(std::move(portable_attribute)); + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.h b/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.h new file mode 100644 index 000000000..e9762bdd6 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.h @@ -0,0 +1,52 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_ENCODER_H_ + +#include "draco/attributes/attribute_quantization_transform.h" +#include "draco/compression/attributes/sequential_integer_attribute_encoder.h" + +namespace draco { + +class MeshEncoder; + +// Attribute encoder that quantizes floating point attribute values. The +// quantized values can be optionally compressed using an entropy coding. +class SequentialQuantizationAttributeEncoder + : public SequentialIntegerAttributeEncoder { + public: + SequentialQuantizationAttributeEncoder(); + uint8_t GetUniqueId() const override { + return SEQUENTIAL_ATTRIBUTE_ENCODER_QUANTIZATION; + } + bool Init(PointCloudEncoder *encoder, int attribute_id) override; + + bool IsLossyEncoder() const override { return true; } + + bool EncodeDataNeededByPortableTransform(EncoderBuffer *out_buffer) override; + + protected: + // Put quantized values in portable attribute for sequential encoding. + bool PrepareValues(const std::vector &point_ids, + int num_points) override; + + private: + // Used for the quantization. + AttributeQuantizationTransform attribute_quantization_transform_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_coding_shared.h b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_coding_shared.h new file mode 100644 index 000000000..faacbd5b9 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_coding_shared.h @@ -0,0 +1,43 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides shared functions for adaptive rANS bit coding. +#ifndef DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_CODING_SHARED_H_ +#define DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_CODING_SHARED_H_ + +#include "draco/core/macros.h" + +namespace draco { + +// Clamp the probability p to a uint8_t in the range [1,255]. +inline uint8_t clamp_probability(double p) { + DRACO_DCHECK_LE(p, 1.0); + DRACO_DCHECK_LE(0.0, p); + uint32_t p_int = static_cast((p * 256) + 0.5); + p_int -= (p_int == 256); + p_int += (p_int == 0); + return static_cast(p_int); +} + +// Update the probability according to new incoming bit. +inline double update_probability(double old_p, bool bit) { + static constexpr double w = 128.0; + static constexpr double w0 = (w - 1.0) / w; + static constexpr double w1 = 1.0 / w; + return old_p * w0 + (!bit) * w1; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_CODING_SHARED_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.cc b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.cc new file mode 100644 index 000000000..056842c4a --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.cc @@ -0,0 +1,70 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/bit_coders/adaptive_rans_bit_decoder.h" + +#include "draco/compression/bit_coders/adaptive_rans_bit_coding_shared.h" + +namespace draco { + +AdaptiveRAnsBitDecoder::AdaptiveRAnsBitDecoder() : p0_f_(0.5) {} + +AdaptiveRAnsBitDecoder::~AdaptiveRAnsBitDecoder() { Clear(); } + +bool AdaptiveRAnsBitDecoder::StartDecoding(DecoderBuffer *source_buffer) { + Clear(); + + uint32_t size_in_bytes; + if (!source_buffer->Decode(&size_in_bytes)) { + return false; + } + if (size_in_bytes > source_buffer->remaining_size()) { + return false; + } + if (ans_read_init(&ans_decoder_, + reinterpret_cast( + const_cast(source_buffer->data_head())), + size_in_bytes) != 0) { + return false; + } + source_buffer->Advance(size_in_bytes); + return true; +} + +bool AdaptiveRAnsBitDecoder::DecodeNextBit() { + const uint8_t p0 = clamp_probability(p0_f_); + const bool bit = static_cast(rabs_read(&ans_decoder_, p0)); + p0_f_ = update_probability(p0_f_, bit); + return bit; +} + +void AdaptiveRAnsBitDecoder::DecodeLeastSignificantBits32(int nbits, + uint32_t *value) { + DRACO_DCHECK_EQ(true, nbits <= 32); + DRACO_DCHECK_EQ(true, nbits > 0); + + uint32_t result = 0; + while (nbits) { + result = (result << 1) + DecodeNextBit(); + --nbits; + } + *value = result; +} + +void AdaptiveRAnsBitDecoder::Clear() { + ans_read_end(&ans_decoder_); + p0_f_ = 0.5; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.h b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.h new file mode 100644 index 000000000..a1ea011dd --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.h @@ -0,0 +1,54 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides basic classes and functions for rANS bit decoding. +#ifndef DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_DECODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_DECODER_H_ + +#include + +#include "draco/compression/entropy/ans.h" +#include "draco/core/decoder_buffer.h" + +namespace draco { + +// Class for decoding a sequence of bits that were encoded with +// AdaptiveRAnsBitEncoder. +class AdaptiveRAnsBitDecoder { + public: + AdaptiveRAnsBitDecoder(); + ~AdaptiveRAnsBitDecoder(); + + // Sets |source_buffer| as the buffer to decode bits from. + bool StartDecoding(DecoderBuffer *source_buffer); + + // Decode one bit. Returns true if the bit is a 1, otherwise false. + bool DecodeNextBit(); + + // Decode the next |nbits| and return the sequence in |value|. |nbits| must be + // > 0 and <= 32. + void DecodeLeastSignificantBits32(int nbits, uint32_t *value); + + void EndDecoding() {} + + private: + void Clear(); + + AnsDecoder ans_decoder_; + double p0_f_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.cc b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.cc new file mode 100644 index 000000000..5ce9dc388 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.cc @@ -0,0 +1,59 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/bit_coders/adaptive_rans_bit_encoder.h" + +#include "draco/compression/bit_coders/adaptive_rans_bit_coding_shared.h" + +namespace draco { + +AdaptiveRAnsBitEncoder::AdaptiveRAnsBitEncoder() {} + +AdaptiveRAnsBitEncoder::~AdaptiveRAnsBitEncoder() { Clear(); } + +void AdaptiveRAnsBitEncoder::StartEncoding() { Clear(); } + +void AdaptiveRAnsBitEncoder::EndEncoding(EncoderBuffer *target_buffer) { + // Buffer for ans to write. + std::vector buffer(bits_.size() + 16); + AnsCoder ans_coder; + ans_write_init(&ans_coder, buffer.data()); + + // Unfortunately we have to encode the bits in reversed order, while the + // probabilities that should be given are those of the forward sequence. + double p0_f = 0.5; + std::vector p0s; + p0s.reserve(bits_.size()); + for (bool b : bits_) { + p0s.push_back(clamp_probability(p0_f)); + p0_f = update_probability(p0_f, b); + } + auto bit = bits_.rbegin(); + auto pit = p0s.rbegin(); + while (bit != bits_.rend()) { + rabs_write(&ans_coder, *bit, *pit); + ++bit; + ++pit; + } + + const uint32_t size_in_bytes = ans_write_end(&ans_coder); + target_buffer->Encode(size_in_bytes); + target_buffer->Encode(buffer.data(), size_in_bytes); + + Clear(); +} + +void AdaptiveRAnsBitEncoder::Clear() { bits_.clear(); } + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.h b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.h new file mode 100644 index 000000000..9b1832844 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.h @@ -0,0 +1,61 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides basic classes and functions for rANS bit encoding. +#ifndef DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_ENCODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_ENCODER_H_ + +#include + +#include "draco/compression/entropy/ans.h" +#include "draco/core/encoder_buffer.h" + +namespace draco { + +// Class for adaptive encoding a sequence of bits using rANS. +class AdaptiveRAnsBitEncoder { + public: + AdaptiveRAnsBitEncoder(); + ~AdaptiveRAnsBitEncoder(); + + // Must be called before any Encode* function is called. + void StartEncoding(); + + // Encode one bit. If |bit| is true encode a 1, otherwise encode a 0. + void EncodeBit(bool bit) { bits_.push_back(bit); } + + // Encode |nbits| of |value|, starting from the least significant bit. + // |nbits| must be > 0 and <= 32. + void EncodeLeastSignificantBits32(int nbits, uint32_t value) { + DRACO_DCHECK_EQ(true, nbits <= 32); + DRACO_DCHECK_EQ(true, nbits > 0); + uint32_t selector = (1 << (nbits - 1)); + while (selector) { + EncodeBit(value & selector); + selector = selector >> 1; + } + } + + // Ends the bit encoding and stores the result into the target_buffer. + void EndEncoding(EncoderBuffer *target_buffer); + + private: + void Clear(); + + std::vector bits_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.cc b/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.cc new file mode 100644 index 000000000..2abe3382a --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.cc @@ -0,0 +1,54 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/bit_coders/direct_bit_decoder.h" + +namespace draco { + +DirectBitDecoder::DirectBitDecoder() : pos_(bits_.end()), num_used_bits_(0) {} + +DirectBitDecoder::~DirectBitDecoder() { Clear(); } + +bool DirectBitDecoder::StartDecoding(DecoderBuffer *source_buffer) { + Clear(); + uint32_t size_in_bytes; + if (!source_buffer->Decode(&size_in_bytes)) { + return false; + } + + // Check that size_in_bytes is > 0 and a multiple of 4 as the encoder always + // encodes 32 bit elements. + if (size_in_bytes == 0 || size_in_bytes & 0x3) { + return false; + } + if (size_in_bytes > source_buffer->remaining_size()) { + return false; + } + const uint32_t num_32bit_elements = size_in_bytes / 4; + bits_.resize(num_32bit_elements); + if (!source_buffer->Decode(bits_.data(), size_in_bytes)) { + return false; + } + pos_ = bits_.begin(); + num_used_bits_ = 0; + return true; +} + +void DirectBitDecoder::Clear() { + bits_.clear(); + num_used_bits_ = 0; + pos_ = bits_.end(); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.h b/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.h new file mode 100644 index 000000000..b9fbc2d6f --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.h @@ -0,0 +1,90 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides direct encoding of bits with arithmetic encoder interface. +#ifndef DRACO_COMPRESSION_BIT_CODERS_DIRECT_BIT_DECODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_DIRECT_BIT_DECODER_H_ + +#include + +#include "draco/core/decoder_buffer.h" + +namespace draco { + +class DirectBitDecoder { + public: + DirectBitDecoder(); + ~DirectBitDecoder(); + + // Sets |source_buffer| as the buffer to decode bits from. + bool StartDecoding(DecoderBuffer *source_buffer); + + // Decode one bit. Returns true if the bit is a 1, otherwise false. + bool DecodeNextBit() { + const uint32_t selector = 1 << (31 - num_used_bits_); + if (pos_ == bits_.end()) { + return false; + } + const bool bit = *pos_ & selector; + ++num_used_bits_; + if (num_used_bits_ == 32) { + ++pos_; + num_used_bits_ = 0; + } + return bit; + } + + // Decode the next |nbits| and return the sequence in |value|. |nbits| must be + // > 0 and <= 32. + void DecodeLeastSignificantBits32(int nbits, uint32_t *value) { + DRACO_DCHECK_EQ(true, nbits <= 32); + DRACO_DCHECK_EQ(true, nbits > 0); + const int remaining = 32 - num_used_bits_; + if (nbits <= remaining) { + if (pos_ == bits_.end()) { + *value = 0; + return; + } + *value = (*pos_ << num_used_bits_) >> (32 - nbits); + num_used_bits_ += nbits; + if (num_used_bits_ == 32) { + ++pos_; + num_used_bits_ = 0; + } + } else { + if (pos_ + 1 == bits_.end()) { + *value = 0; + return; + } + const uint32_t value_l = ((*pos_) << num_used_bits_); + num_used_bits_ = nbits - remaining; + ++pos_; + const uint32_t value_r = (*pos_) >> (32 - num_used_bits_); + *value = (value_l >> (32 - num_used_bits_ - remaining)) | value_r; + } + } + + void EndDecoding() {} + + private: + void Clear(); + + std::vector bits_; + std::vector::const_iterator pos_; + uint32_t num_used_bits_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_DIRECT_BIT_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.cc b/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.cc new file mode 100644 index 000000000..d39143cf5 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.cc @@ -0,0 +1,39 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/bit_coders/direct_bit_encoder.h" + +namespace draco { + +DirectBitEncoder::DirectBitEncoder() : local_bits_(0), num_local_bits_(0) {} + +DirectBitEncoder::~DirectBitEncoder() { Clear(); } + +void DirectBitEncoder::StartEncoding() { Clear(); } + +void DirectBitEncoder::EndEncoding(EncoderBuffer *target_buffer) { + bits_.push_back(local_bits_); + const uint32_t size_in_byte = static_cast(bits_.size()) * 4; + target_buffer->Encode(size_in_byte); + target_buffer->Encode(bits_.data(), size_in_byte); + Clear(); +} + +void DirectBitEncoder::Clear() { + bits_.clear(); + local_bits_ = 0; + num_local_bits_ = 0; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.h b/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.h new file mode 100644 index 000000000..705b2ca93 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.h @@ -0,0 +1,89 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides direct encoding of bits with arithmetic encoder interface. +#ifndef DRACO_COMPRESSION_BIT_CODERS_DIRECT_BIT_ENCODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_DIRECT_BIT_ENCODER_H_ + +#include + +#include "draco/core/encoder_buffer.h" + +namespace draco { + +class DirectBitEncoder { + public: + DirectBitEncoder(); + ~DirectBitEncoder(); + + // Must be called before any Encode* function is called. + void StartEncoding(); + + // Encode one bit. If |bit| is true encode a 1, otherwise encode a 0. + void EncodeBit(bool bit) { + if (bit) { + local_bits_ |= 1 << (31 - num_local_bits_); + } + num_local_bits_++; + if (num_local_bits_ == 32) { + bits_.push_back(local_bits_); + num_local_bits_ = 0; + local_bits_ = 0; + } + } + + // Encode |nbits| of |value|, starting from the least significant bit. + // |nbits| must be > 0 and <= 32. + void EncodeLeastSignificantBits32(int nbits, uint32_t value) { + DRACO_DCHECK_EQ(true, nbits <= 32); + DRACO_DCHECK_EQ(true, nbits > 0); + + const int remaining = 32 - num_local_bits_; + + // Make sure there are no leading bits that should not be encoded and + // start from here. + value = value << (32 - nbits); + if (nbits <= remaining) { + value = value >> num_local_bits_; + local_bits_ = local_bits_ | value; + num_local_bits_ += nbits; + if (num_local_bits_ == 32) { + bits_.push_back(local_bits_); + local_bits_ = 0; + num_local_bits_ = 0; + } + } else { + value = value >> (32 - nbits); + num_local_bits_ = nbits - remaining; + const uint32_t value_l = value >> num_local_bits_; + local_bits_ = local_bits_ | value_l; + bits_.push_back(local_bits_); + local_bits_ = value << (32 - num_local_bits_); + } + } + + // Ends the bit encoding and stores the result into the target_buffer. + void EndEncoding(EncoderBuffer *target_buffer); + + private: + void Clear(); + + std::vector bits_; + uint32_t local_bits_; + uint32_t num_local_bits_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_DIRECT_BIT_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_decoder.h b/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_decoder.h new file mode 100644 index 000000000..c14058b65 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_decoder.h @@ -0,0 +1,77 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides direct encoding of bits with arithmetic encoder interface. +#ifndef DRACO_COMPRESSION_BIT_CODERS_FOLDED_INTEGER_BIT_DECODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_FOLDED_INTEGER_BIT_DECODER_H_ + +#include + +#include "draco/core/decoder_buffer.h" + +namespace draco { + +// See FoldedBit32Encoder for more details. +template +class FoldedBit32Decoder { + public: + FoldedBit32Decoder() {} + ~FoldedBit32Decoder() {} + + // Sets |source_buffer| as the buffer to decode bits from. + bool StartDecoding(DecoderBuffer *source_buffer) { + for (int i = 0; i < 32; i++) { + if (!folded_number_decoders_[i].StartDecoding(source_buffer)) { + return false; + } + } + return bit_decoder_.StartDecoding(source_buffer); + } + + // Decode one bit. Returns true if the bit is a 1, otherwise false. + bool DecodeNextBit() { return bit_decoder_.DecodeNextBit(); } + + // Decode the next |nbits| and return the sequence in |value|. |nbits| must be + // > 0 and <= 32. + void DecodeLeastSignificantBits32(int nbits, uint32_t *value) { + uint32_t result = 0; + for (int i = 0; i < nbits; ++i) { + const bool bit = folded_number_decoders_[i].DecodeNextBit(); + result = (result << 1) + bit; + } + *value = result; + } + + void EndDecoding() { + for (int i = 0; i < 32; i++) { + folded_number_decoders_[i].EndDecoding(); + } + bit_decoder_.EndDecoding(); + } + + private: + void Clear() { + for (int i = 0; i < 32; i++) { + folded_number_decoders_[i].Clear(); + } + bit_decoder_.Clear(); + } + + std::array folded_number_decoders_; + BitDecoderT bit_decoder_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_FOLDED_INTEGER_BIT_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_encoder.h b/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_encoder.h new file mode 100644 index 000000000..375b38a61 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_encoder.h @@ -0,0 +1,82 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides direct encoding of bits with arithmetic encoder interface. +#ifndef DRACO_COMPRESSION_BIT_CODERS_FOLDED_INTEGER_BIT_ENCODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_FOLDED_INTEGER_BIT_ENCODER_H_ + +#include + +#include "draco/core/encoder_buffer.h" + +namespace draco { + +// This coding scheme considers every bit of an (up to) 32bit integer as a +// separate context. This can be a significant advantage when encoding numbers +// where it is more likely that the front bits are zero. +// The behavior is essentially the same as other arithmetic encoding schemes, +// the only difference is that encoding and decoding of bits must be absolutely +// symmetric, bits handed in by EncodeBit32 must be also decoded in this way. +// This is the FoldedBit32Encoder, see also FoldedBit32Decoder. +template +class FoldedBit32Encoder { + public: + FoldedBit32Encoder() {} + ~FoldedBit32Encoder() {} + + // Must be called before any Encode* function is called. + void StartEncoding() { + for (int i = 0; i < 32; i++) { + folded_number_encoders_[i].StartEncoding(); + } + bit_encoder_.StartEncoding(); + } + + // Encode one bit. If |bit| is true encode a 1, otherwise encode a 0. + void EncodeBit(bool bit) { bit_encoder_.EncodeBit(bit); } + + // Encode |nbits| of |value|, starting from the least significant bit. + // |nbits| must be > 0 and <= 32. + void EncodeLeastSignificantBits32(int nbits, uint32_t value) { + uint32_t selector = 1 << (nbits - 1); + for (int i = 0; i < nbits; i++) { + const bool bit = (value & selector); + folded_number_encoders_[i].EncodeBit(bit); + selector = selector >> 1; + } + } + + // Ends the bit encoding and stores the result into the target_buffer. + void EndEncoding(EncoderBuffer *target_buffer) { + for (int i = 0; i < 32; i++) { + folded_number_encoders_[i].EndEncoding(target_buffer); + } + bit_encoder_.EndEncoding(target_buffer); + } + + private: + void Clear() { + for (int i = 0; i < 32; i++) { + folded_number_encoders_[i].Clear(); + } + bit_encoder_.Clear(); + } + + std::array folded_number_encoders_; + BitEncoderT bit_encoder_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_FOLDED_INTEGER_BIT_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.cc b/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.cc new file mode 100644 index 000000000..a9b8fb9e9 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.cc @@ -0,0 +1,82 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/bit_coders/rans_bit_decoder.h" + +#include "draco/compression/config/compression_shared.h" +#include "draco/core/bit_utils.h" +#include "draco/core/varint_decoding.h" + +namespace draco { + +RAnsBitDecoder::RAnsBitDecoder() : prob_zero_(0) {} + +RAnsBitDecoder::~RAnsBitDecoder() { Clear(); } + +bool RAnsBitDecoder::StartDecoding(DecoderBuffer *source_buffer) { + Clear(); + + if (!source_buffer->Decode(&prob_zero_)) { + return false; + } + + uint32_t size_in_bytes; +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (source_buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) { + if (!source_buffer->Decode(&size_in_bytes)) { + return false; + } + + } else +#endif + { + if (!DecodeVarint(&size_in_bytes, source_buffer)) { + return false; + } + } + + if (size_in_bytes > source_buffer->remaining_size()) { + return false; + } + + if (ans_read_init(&ans_decoder_, + reinterpret_cast( + const_cast(source_buffer->data_head())), + size_in_bytes) != 0) { + return false; + } + source_buffer->Advance(size_in_bytes); + return true; +} + +bool RAnsBitDecoder::DecodeNextBit() { + const uint8_t bit = rabs_read(&ans_decoder_, prob_zero_); + return bit > 0; +} + +void RAnsBitDecoder::DecodeLeastSignificantBits32(int nbits, uint32_t *value) { + DRACO_DCHECK_EQ(true, nbits <= 32); + DRACO_DCHECK_EQ(true, nbits > 0); + + uint32_t result = 0; + while (nbits) { + result = (result << 1) + DecodeNextBit(); + --nbits; + } + *value = result; +} + +void RAnsBitDecoder::Clear() { ans_read_end(&ans_decoder_); } + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.h b/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.h new file mode 100644 index 000000000..25d243eac --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.h @@ -0,0 +1,55 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides basic classes and functions for rANS coding. +#ifndef DRACO_COMPRESSION_BIT_CODERS_RANS_BIT_DECODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_RANS_BIT_DECODER_H_ + +#include + +#include "draco/compression/entropy/ans.h" +#include "draco/core/decoder_buffer.h" +#include "draco/draco_features.h" + +namespace draco { + +// Class for decoding a sequence of bits that were encoded with RAnsBitEncoder. +class RAnsBitDecoder { + public: + RAnsBitDecoder(); + ~RAnsBitDecoder(); + + // Sets |source_buffer| as the buffer to decode bits from. + // Returns false when the data is invalid. + bool StartDecoding(DecoderBuffer *source_buffer); + + // Decode one bit. Returns true if the bit is a 1, otherwise false. + bool DecodeNextBit(); + + // Decode the next |nbits| and return the sequence in |value|. |nbits| must be + // > 0 and <= 32. + void DecodeLeastSignificantBits32(int nbits, uint32_t *value); + + void EndDecoding() {} + + private: + void Clear(); + + AnsDecoder ans_decoder_; + uint8_t prob_zero_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_RANS_BIT_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.cc b/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.cc new file mode 100644 index 000000000..8d00ea352 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.cc @@ -0,0 +1,125 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/bit_coders/rans_bit_encoder.h" + +#include "draco/compression/entropy/ans.h" +#include "draco/core/bit_utils.h" +#include "draco/core/varint_encoding.h" + +namespace draco { + +RAnsBitEncoder::RAnsBitEncoder() : local_bits_(0), num_local_bits_(0) {} + +RAnsBitEncoder::~RAnsBitEncoder() { Clear(); } + +void RAnsBitEncoder::StartEncoding() { Clear(); } + +void RAnsBitEncoder::EncodeBit(bool bit) { + if (bit) { + bit_counts_[1]++; + local_bits_ |= 1 << num_local_bits_; + } else { + bit_counts_[0]++; + } + num_local_bits_++; + + if (num_local_bits_ == 32) { + bits_.push_back(local_bits_); + num_local_bits_ = 0; + local_bits_ = 0; + } +} + +void RAnsBitEncoder::EncodeLeastSignificantBits32(int nbits, uint32_t value) { + DRACO_DCHECK_EQ(true, nbits <= 32); + DRACO_DCHECK_EQ(true, nbits > 0); + + const uint32_t reversed = ReverseBits32(value) >> (32 - nbits); + const int ones = CountOneBits32(reversed); + bit_counts_[0] += (nbits - ones); + bit_counts_[1] += ones; + + const int remaining = 32 - num_local_bits_; + + if (nbits <= remaining) { + CopyBits32(&local_bits_, num_local_bits_, reversed, 0, nbits); + num_local_bits_ += nbits; + if (num_local_bits_ == 32) { + bits_.push_back(local_bits_); + local_bits_ = 0; + num_local_bits_ = 0; + } + } else { + CopyBits32(&local_bits_, num_local_bits_, reversed, 0, remaining); + bits_.push_back(local_bits_); + local_bits_ = 0; + CopyBits32(&local_bits_, 0, reversed, remaining, nbits - remaining); + num_local_bits_ = nbits - remaining; + } +} + +void RAnsBitEncoder::EndEncoding(EncoderBuffer *target_buffer) { + uint64_t total = bit_counts_[1] + bit_counts_[0]; + if (total == 0) { + total++; + } + + // The probability interval [0,1] is mapped to values of [0, 256]. However, + // the coding scheme can not deal with probabilities of 0 or 1, which is why + // we must clamp the values to interval [1, 255]. Specifically 128 + // corresponds to 0.5 exactly. And the value can be given as uint8_t. + const uint32_t zero_prob_raw = static_cast( + ((bit_counts_[0] / static_cast(total)) * 256.0) + 0.5); + + uint8_t zero_prob = 255; + if (zero_prob_raw < 255) { + zero_prob = static_cast(zero_prob_raw); + } + + zero_prob += (zero_prob == 0); + + // Space for 32 bit integer and some extra space. + std::vector buffer((bits_.size() + 8) * 8); + AnsCoder ans_coder; + ans_write_init(&ans_coder, buffer.data()); + + for (int i = num_local_bits_ - 1; i >= 0; --i) { + const uint8_t bit = (local_bits_ >> i) & 1; + rabs_write(&ans_coder, bit, zero_prob); + } + for (auto it = bits_.rbegin(); it != bits_.rend(); ++it) { + const uint32_t bits = *it; + for (int i = 31; i >= 0; --i) { + const uint8_t bit = (bits >> i) & 1; + rabs_write(&ans_coder, bit, zero_prob); + } + } + + const int size_in_bytes = ans_write_end(&ans_coder); + target_buffer->Encode(zero_prob); + EncodeVarint(static_cast(size_in_bytes), target_buffer); + target_buffer->Encode(buffer.data(), size_in_bytes); + + Clear(); +} + +void RAnsBitEncoder::Clear() { + bit_counts_.assign(2, 0); + bits_.clear(); + local_bits_ = 0; + num_local_bits_ = 0; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.h b/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.h new file mode 100644 index 000000000..1993dd3d3 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.h @@ -0,0 +1,57 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides basic classes and functions for rANS coding. +#ifndef DRACO_COMPRESSION_BIT_CODERS_RANS_BIT_ENCODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_RANS_BIT_ENCODER_H_ + +#include + +#include "draco/core/encoder_buffer.h" + +namespace draco { + +// Class for encoding a sequence of bits using rANS. The probability table used +// to encode the bits is based off the total counts of bits. +// TODO(fgalligan): Investigate using an adaptive table for more compression. +class RAnsBitEncoder { + public: + RAnsBitEncoder(); + ~RAnsBitEncoder(); + + // Must be called before any Encode* function is called. + void StartEncoding(); + + // Encode one bit. If |bit| is true encode a 1, otherwise encode a 0. + void EncodeBit(bool bit); + + // Encode |nbits| of |value|, starting from the least significant bit. + // |nbits| must be > 0 and <= 32. + void EncodeLeastSignificantBits32(int nbits, uint32_t value); + + // Ends the bit encoding and stores the result into the target_buffer. + void EndEncoding(EncoderBuffer *target_buffer); + + private: + void Clear(); + + std::vector bit_counts_; + std::vector bits_; + uint32_t local_bits_; + uint32_t num_local_bits_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_RANS_BIT_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/rans_coding_test.cc b/contrib/draco/src/draco/compression/bit_coders/rans_coding_test.cc new file mode 100644 index 000000000..9509ad9f3 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/rans_coding_test.cc @@ -0,0 +1,9 @@ +#include "draco/compression/bit_coders/adaptive_rans_bit_decoder.h" +#include "draco/compression/bit_coders/adaptive_rans_bit_encoder.h" +#include "draco/compression/bit_coders/rans_bit_decoder.h" +#include "draco/compression/bit_coders/rans_bit_encoder.h" +#include "draco/core/draco_test_base.h" + +// Just including rans_coding.h and adaptive_rans_coding.h gets an asan error +// when compiling (blaze test :rans_coding_test --config=asan) +TEST(RansCodingTest, LinkerTest) {} diff --git a/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.cc b/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.cc new file mode 100644 index 000000000..8ed50ef92 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.cc @@ -0,0 +1,49 @@ +#include "draco/compression/bit_coders/symbol_bit_decoder.h" + +#include "draco/compression/entropy/symbol_decoding.h" + +namespace draco { + +bool SymbolBitDecoder::StartDecoding(DecoderBuffer *source_buffer) { + uint32_t size; + if (!source_buffer->Decode(&size)) { + return false; + } + + symbols_.resize(size); + if (!DecodeSymbols(size, 1, source_buffer, symbols_.data())) { + return false; + } + std::reverse(symbols_.begin(), symbols_.end()); + return true; +} + +bool SymbolBitDecoder::DecodeNextBit() { + uint32_t symbol; + DecodeLeastSignificantBits32(1, &symbol); + DRACO_DCHECK(symbol == 0 || symbol == 1); + return symbol == 1; +} + +void SymbolBitDecoder::DecodeLeastSignificantBits32(int nbits, + uint32_t *value) { + DRACO_DCHECK_LE(1, nbits); + DRACO_DCHECK_LE(nbits, 32); + DRACO_DCHECK_NE(value, nullptr); + // Testing: check to make sure there is something to decode. + DRACO_DCHECK_GT(symbols_.size(), 0); + + (*value) = symbols_.back(); + symbols_.pop_back(); + + const int discarded_bits = 32 - nbits; + (*value) <<= discarded_bits; + (*value) >>= discarded_bits; +} + +void SymbolBitDecoder::Clear() { + symbols_.clear(); + symbols_.shrink_to_fit(); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.h b/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.h new file mode 100644 index 000000000..909d7174f --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.h @@ -0,0 +1,36 @@ +#ifndef DRACO_COMPRESSION_BIT_CODERS_SYMBOL_BIT_DECODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_SYMBOL_BIT_DECODER_H_ + +#include +#include + +#include "draco/core/decoder_buffer.h" + +namespace draco { + +// Class for decoding bits using the symbol entropy encoding. Wraps +// |DecodeSymbols|. Note that this uses a symbol-based encoding scheme for +// encoding bits. +class SymbolBitDecoder { + public: + // Sets |source_buffer| as the buffer to decode bits from. + bool StartDecoding(DecoderBuffer *source_buffer); + + // Decode one bit. Returns true if the bit is a 1, otherwise false. + bool DecodeNextBit(); + + // Decode the next |nbits| and return the sequence in |value|. |nbits| must be + // > 0 and <= 32. + void DecodeLeastSignificantBits32(int nbits, uint32_t *value); + + void EndDecoding() { Clear(); } + + private: + void Clear(); + + std::vector symbols_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_SYMBOL_BIT_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.cc b/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.cc new file mode 100644 index 000000000..83834236f --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.cc @@ -0,0 +1,30 @@ +#include "draco/compression/bit_coders/symbol_bit_encoder.h" + +#include "draco/compression/entropy/symbol_encoding.h" + +namespace draco { + +void SymbolBitEncoder::EncodeLeastSignificantBits32(int nbits, uint32_t value) { + DRACO_DCHECK_LE(1, nbits); + DRACO_DCHECK_LE(nbits, 32); + + const int discarded_bits = 32 - nbits; + value <<= discarded_bits; + value >>= discarded_bits; + + symbols_.push_back(value); +} + +void SymbolBitEncoder::EndEncoding(EncoderBuffer *target_buffer) { + target_buffer->Encode(static_cast(symbols_.size())); + EncodeSymbols(symbols_.data(), static_cast(symbols_.size()), 1, nullptr, + target_buffer); + Clear(); +} + +void SymbolBitEncoder::Clear() { + symbols_.clear(); + symbols_.shrink_to_fit(); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.h b/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.h new file mode 100644 index 000000000..7f1570c1a --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.h @@ -0,0 +1,36 @@ +#ifndef DRACO_COMPRESSION_BIT_CODERS_SYMBOL_BIT_ENCODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_SYMBOL_BIT_ENCODER_H_ + +#include +#include + +#include "draco/core/encoder_buffer.h" + +namespace draco { + +// Class for encoding bits using the symbol entropy encoding. Wraps +// |EncodeSymbols|. Note that this uses a symbol-based encoding scheme for +// encoding bits. +class SymbolBitEncoder { + public: + // Must be called before any Encode* function is called. + void StartEncoding() { Clear(); } + + // Encode one bit. If |bit| is true encode a 1, otherwise encode a 0. + void EncodeBit(bool bit) { EncodeLeastSignificantBits32(1, bit ? 1 : 0); } + + // Encode |nbits| LSBs of |value| as a symbol. |nbits| must be > 0 and <= 32. + void EncodeLeastSignificantBits32(int nbits, uint32_t value); + + // Ends the bit encoding and stores the result into the target_buffer. + void EndEncoding(EncoderBuffer *target_buffer); + + private: + void Clear(); + + std::vector symbols_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_SYMBOL_BIT_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/config/compression_shared.h b/contrib/draco/src/draco/compression/config/compression_shared.h new file mode 100644 index 000000000..c43f303bd --- /dev/null +++ b/contrib/draco/src/draco/compression/config/compression_shared.h @@ -0,0 +1,155 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_CONFIG_COMPRESSION_SHARED_H_ +#define DRACO_COMPRESSION_CONFIG_COMPRESSION_SHARED_H_ + +#include + +#include "draco/core/macros.h" +#include "draco/draco_features.h" + +namespace draco { + +// Latest Draco bit-stream version. +static constexpr uint8_t kDracoPointCloudBitstreamVersionMajor = 2; +static constexpr uint8_t kDracoPointCloudBitstreamVersionMinor = 3; +static constexpr uint8_t kDracoMeshBitstreamVersionMajor = 2; +static constexpr uint8_t kDracoMeshBitstreamVersionMinor = 2; + +// Concatenated latest bit-stream version. +static constexpr uint16_t kDracoPointCloudBitstreamVersion = + DRACO_BITSTREAM_VERSION(kDracoPointCloudBitstreamVersionMajor, + kDracoPointCloudBitstreamVersionMinor); + +static constexpr uint16_t kDracoMeshBitstreamVersion = DRACO_BITSTREAM_VERSION( + kDracoMeshBitstreamVersionMajor, kDracoMeshBitstreamVersionMinor); + +// Currently, we support point cloud and triangular mesh encoding. +// TODO(draco-eng) Convert enum to enum class (safety, not performance). +enum EncodedGeometryType { + INVALID_GEOMETRY_TYPE = -1, + POINT_CLOUD = 0, + TRIANGULAR_MESH, + NUM_ENCODED_GEOMETRY_TYPES +}; + +// List of encoding methods for point clouds. +enum PointCloudEncodingMethod { + POINT_CLOUD_SEQUENTIAL_ENCODING = 0, + POINT_CLOUD_KD_TREE_ENCODING +}; + +// List of encoding methods for meshes. +enum MeshEncoderMethod { + MESH_SEQUENTIAL_ENCODING = 0, + MESH_EDGEBREAKER_ENCODING, +}; + +// List of various attribute encoders supported by our framework. The entries +// are used as unique identifiers of the encoders and their values should not +// be changed! +enum AttributeEncoderType { + BASIC_ATTRIBUTE_ENCODER = 0, + MESH_TRAVERSAL_ATTRIBUTE_ENCODER, + KD_TREE_ATTRIBUTE_ENCODER, +}; + +// List of various sequential attribute encoder/decoders that can be used in our +// pipeline. The values represent unique identifiers used by the decoder and +// they should not be changed. +enum SequentialAttributeEncoderType { + SEQUENTIAL_ATTRIBUTE_ENCODER_GENERIC = 0, + SEQUENTIAL_ATTRIBUTE_ENCODER_INTEGER, + SEQUENTIAL_ATTRIBUTE_ENCODER_QUANTIZATION, + SEQUENTIAL_ATTRIBUTE_ENCODER_NORMALS, +}; + +// List of all prediction methods currently supported by our framework. +enum PredictionSchemeMethod { + // Special value indicating that no prediction scheme was used. + PREDICTION_NONE = -2, + // Used when no specific prediction scheme is required. + PREDICTION_UNDEFINED = -1, + PREDICTION_DIFFERENCE = 0, + MESH_PREDICTION_PARALLELOGRAM = 1, + MESH_PREDICTION_MULTI_PARALLELOGRAM = 2, + MESH_PREDICTION_TEX_COORDS_DEPRECATED = 3, + MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM = 4, + MESH_PREDICTION_TEX_COORDS_PORTABLE = 5, + MESH_PREDICTION_GEOMETRIC_NORMAL = 6, + NUM_PREDICTION_SCHEMES +}; + +// List of all prediction scheme transforms used by our framework. +enum PredictionSchemeTransformType { + PREDICTION_TRANSFORM_NONE = -1, + // Basic delta transform where the prediction is computed as difference the + // predicted and original value. + PREDICTION_TRANSFORM_DELTA = 0, + // An improved delta transform where all computed delta values are wrapped + // around a fixed interval which lowers the entropy. + PREDICTION_TRANSFORM_WRAP = 1, + // Specialized transform for normal coordinates using inverted tiles. + PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON = 2, + // Specialized transform for normal coordinates using canonicalized inverted + // tiles. + PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON_CANONICALIZED = 3, + // The number of valid (non-negative) prediction scheme transform types. + NUM_PREDICTION_SCHEME_TRANSFORM_TYPES +}; + +// List of all mesh traversal methods supported by Draco framework. +enum MeshTraversalMethod { + MESH_TRAVERSAL_DEPTH_FIRST = 0, + MESH_TRAVERSAL_PREDICTION_DEGREE = 1, + NUM_TRAVERSAL_METHODS +}; + +// List of all variant of the edgebreaker method that is used for compression +// of mesh connectivity. +enum MeshEdgebreakerConnectivityEncodingMethod { + MESH_EDGEBREAKER_STANDARD_ENCODING = 0, + MESH_EDGEBREAKER_PREDICTIVE_ENCODING = 1, // Deprecated. + MESH_EDGEBREAKER_VALENCE_ENCODING = 2, +}; + +// Draco header V1 +struct DracoHeader { + int8_t draco_string[5]; + uint8_t version_major; + uint8_t version_minor; + uint8_t encoder_type; + uint8_t encoder_method; + uint16_t flags; +}; + +enum NormalPredictionMode { + ONE_TRIANGLE = 0, // To be deprecated. + TRIANGLE_AREA = 1, +}; + +// Different methods used for symbol entropy encoding. +enum SymbolCodingMethod { + SYMBOL_CODING_TAGGED = 0, + SYMBOL_CODING_RAW = 1, + NUM_SYMBOL_CODING_METHODS, +}; + +// Mask for setting and getting the bit for metadata in |flags| of header. +#define METADATA_FLAG_MASK 0x8000 + +} // namespace draco + +#endif // DRACO_COMPRESSION_CONFIG_COMPRESSION_SHARED_H_ diff --git a/contrib/draco/src/draco/compression/config/decoder_options.h b/contrib/draco/src/draco/compression/config/decoder_options.h new file mode 100644 index 000000000..3b3889993 --- /dev/null +++ b/contrib/draco/src/draco/compression/config/decoder_options.h @@ -0,0 +1,34 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_CONFIG_DECODER_OPTIONS_H_ +#define DRACO_COMPRESSION_CONFIG_DECODER_OPTIONS_H_ + +#include +#include + +#include "draco/attributes/geometry_attribute.h" +#include "draco/compression/config/draco_options.h" + +namespace draco { + +// Class containing options that can be passed to PointCloudDecoder to control +// decoding of the input geometry. The options can be specified either for the +// whole geometry or for a specific attribute type. Each option is identified +// by a unique name stored as an std::string. +typedef DracoOptions DecoderOptions; + +} // namespace draco + +#endif // DRACO_COMPRESSION_CONFIG_DECODER_OPTIONS_H_ diff --git a/contrib/draco/src/draco/compression/config/decoder_options_test.cc b/contrib/draco/src/draco/compression/config/decoder_options_test.cc new file mode 100644 index 000000000..a5cd7f106 --- /dev/null +++ b/contrib/draco/src/draco/compression/config/decoder_options_test.cc @@ -0,0 +1,67 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/config/decoder_options.h" + +#include "draco/core/draco_test_base.h" + +namespace { + +class DecoderOptionsTest : public ::testing::Test { + protected: + DecoderOptionsTest() {} +}; + +TEST_F(DecoderOptionsTest, TestOptions) { + // This test verifies that we can update global and attribute options of the + // DecoderOptions class instance. + draco::DecoderOptions options; + options.SetGlobalInt("test", 3); + ASSERT_EQ(options.GetGlobalInt("test", -1), 3); + + options.SetAttributeInt(draco::GeometryAttribute::POSITION, "test", 1); + options.SetAttributeInt(draco::GeometryAttribute::GENERIC, "test", 2); + ASSERT_EQ( + options.GetAttributeInt(draco::GeometryAttribute::TEX_COORD, "test", -1), + 3); + ASSERT_EQ( + options.GetAttributeInt(draco::GeometryAttribute::POSITION, "test", -1), + 1); + ASSERT_EQ( + options.GetAttributeInt(draco::GeometryAttribute::GENERIC, "test", -1), + 2); +} + +TEST_F(DecoderOptionsTest, TestAttributeOptionsAccessors) { + // This test verifies that we can query options stored in DecoderOptions + // class instance. + draco::DecoderOptions options; + options.SetGlobalInt("test", 1); + options.SetAttributeInt(draco::GeometryAttribute::POSITION, "test", 2); + options.SetAttributeInt(draco::GeometryAttribute::TEX_COORD, "test", 3); + + ASSERT_EQ( + options.GetAttributeInt(draco::GeometryAttribute::POSITION, "test", -1), + 2); + ASSERT_EQ( + options.GetAttributeInt(draco::GeometryAttribute::POSITION, "test2", -1), + -1); + ASSERT_EQ( + options.GetAttributeInt(draco::GeometryAttribute::TEX_COORD, "test", -1), + 3); + ASSERT_EQ( + options.GetAttributeInt(draco::GeometryAttribute::NORMAL, "test", -1), 1); +} + +} // namespace diff --git a/contrib/draco/src/draco/compression/config/draco_options.h b/contrib/draco/src/draco/compression/config/draco_options.h new file mode 100644 index 000000000..2bd4a3b67 --- /dev/null +++ b/contrib/draco/src/draco/compression/config/draco_options.h @@ -0,0 +1,249 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_CONFIG_DRACO_OPTIONS_H_ +#define DRACO_COMPRESSION_CONFIG_DRACO_OPTIONS_H_ + +#include +#include + +#include "draco/core/options.h" + +namespace draco { + +// Base option class used to control encoding and decoding. The geometry coding +// can be controlled through the following options: +// 1. Global options - Options specific to overall geometry or options common +// for all attributes +// 2. Per attribute options - Options specific to a given attribute. +// Each attribute is identified by the template +// argument AttributeKeyT that can be for example +// the attribute type or the attribute id. +// +// Example: +// +// DracoOptions options; +// +// // Set an option common for all attributes. +// options.SetGlobalInt("some_option_name", 2); +// +// // Geometry with two attributes. +// AttributeKey att_key0 = in_key0; +// AttributeKey att_key1 = in_key1; +// +// options.SetAttributeInt(att_key0, "some_option_name", 3); +// +// options.GetAttributeInt(att_key0, "some_option_name"); // Returns 3 +// options.GetAttributeInt(att_key1, "some_option_name"); // Returns 2 +// options.GetGlobalInt("some_option_name"); // Returns 2 +// +template +class DracoOptions { + public: + typedef AttributeKeyT AttributeKey; + + // Get an option for a specific attribute key. If the option is not found in + // an attribute specific storage, the implementation will return a global + // option of the given name (if available). If the option is not found, the + // provided default value |default_val| is returned instead. + int GetAttributeInt(const AttributeKey &att_key, const std::string &name, + int default_val) const; + + // Sets an option for a specific attribute key. + void SetAttributeInt(const AttributeKey &att_key, const std::string &name, + int val); + + float GetAttributeFloat(const AttributeKey &att_key, const std::string &name, + float default_val) const; + void SetAttributeFloat(const AttributeKey &att_key, const std::string &name, + float val); + bool GetAttributeBool(const AttributeKey &att_key, const std::string &name, + bool default_val) const; + void SetAttributeBool(const AttributeKey &att_key, const std::string &name, + bool val); + template + bool GetAttributeVector(const AttributeKey &att_key, const std::string &name, + int num_dims, DataTypeT *val) const; + template + void SetAttributeVector(const AttributeKey &att_key, const std::string &name, + int num_dims, const DataTypeT *val); + + bool IsAttributeOptionSet(const AttributeKey &att_key, + const std::string &name) const; + + // Gets/sets a global option that is not specific to any attribute. + int GetGlobalInt(const std::string &name, int default_val) const { + return global_options_.GetInt(name, default_val); + } + void SetGlobalInt(const std::string &name, int val) { + global_options_.SetInt(name, val); + } + float GetGlobalFloat(const std::string &name, float default_val) const { + return global_options_.GetFloat(name, default_val); + } + void SetGlobalFloat(const std::string &name, float val) { + global_options_.SetFloat(name, val); + } + bool GetGlobalBool(const std::string &name, bool default_val) const { + return global_options_.GetBool(name, default_val); + } + void SetGlobalBool(const std::string &name, bool val) { + global_options_.SetBool(name, val); + } + template + bool GetGlobalVector(const std::string &name, int num_dims, + DataTypeT *val) const { + return global_options_.GetVector(name, num_dims, val); + } + template + void SetGlobalVector(const std::string &name, int num_dims, + const DataTypeT *val) { + global_options_.SetVector(name, val, num_dims); + } + bool IsGlobalOptionSet(const std::string &name) const { + return global_options_.IsOptionSet(name); + } + + // Sets or replaces attribute options with the provided |options|. + void SetAttributeOptions(const AttributeKey &att_key, const Options &options); + void SetGlobalOptions(const Options &options) { global_options_ = options; } + + // Returns |Options| instance for the specified options class if it exists. + const Options *FindAttributeOptions(const AttributeKeyT &att_key) const; + const Options &GetGlobalOptions() const { return global_options_; } + + private: + Options *GetAttributeOptions(const AttributeKeyT &att_key); + + Options global_options_; + + // Storage for options related to geometry attributes. + std::map attribute_options_; +}; + +template +const Options *DracoOptions::FindAttributeOptions( + const AttributeKeyT &att_key) const { + auto it = attribute_options_.find(att_key); + if (it == attribute_options_.end()) { + return nullptr; + } + return &it->second; +} + +template +Options *DracoOptions::GetAttributeOptions( + const AttributeKeyT &att_key) { + auto it = attribute_options_.find(att_key); + if (it != attribute_options_.end()) { + return &it->second; + } + Options new_options; + it = attribute_options_.insert(std::make_pair(att_key, new_options)).first; + return &it->second; +} + +template +int DracoOptions::GetAttributeInt(const AttributeKeyT &att_key, + const std::string &name, + int default_val) const { + const Options *const att_options = FindAttributeOptions(att_key); + if (att_options && att_options->IsOptionSet(name)) { + return att_options->GetInt(name, default_val); + } + return global_options_.GetInt(name, default_val); +} + +template +void DracoOptions::SetAttributeInt(const AttributeKeyT &att_key, + const std::string &name, + int val) { + GetAttributeOptions(att_key)->SetInt(name, val); +} + +template +float DracoOptions::GetAttributeFloat( + const AttributeKeyT &att_key, const std::string &name, + float default_val) const { + const Options *const att_options = FindAttributeOptions(att_key); + if (att_options && att_options->IsOptionSet(name)) { + return att_options->GetFloat(name, default_val); + } + return global_options_.GetFloat(name, default_val); +} + +template +void DracoOptions::SetAttributeFloat( + const AttributeKeyT &att_key, const std::string &name, float val) { + GetAttributeOptions(att_key)->SetFloat(name, val); +} + +template +bool DracoOptions::GetAttributeBool(const AttributeKeyT &att_key, + const std::string &name, + bool default_val) const { + const Options *const att_options = FindAttributeOptions(att_key); + if (att_options && att_options->IsOptionSet(name)) { + return att_options->GetBool(name, default_val); + } + return global_options_.GetBool(name, default_val); +} + +template +void DracoOptions::SetAttributeBool(const AttributeKeyT &att_key, + const std::string &name, + bool val) { + GetAttributeOptions(att_key)->SetBool(name, val); +} + +template +template +bool DracoOptions::GetAttributeVector( + const AttributeKey &att_key, const std::string &name, int num_dims, + DataTypeT *val) const { + const Options *const att_options = FindAttributeOptions(att_key); + if (att_options && att_options->IsOptionSet(name)) { + return att_options->GetVector(name, num_dims, val); + } + return global_options_.GetVector(name, num_dims, val); +} + +template +template +void DracoOptions::SetAttributeVector( + const AttributeKey &att_key, const std::string &name, int num_dims, + const DataTypeT *val) { + GetAttributeOptions(att_key)->SetVector(name, val, num_dims); +} + +template +bool DracoOptions::IsAttributeOptionSet( + const AttributeKey &att_key, const std::string &name) const { + const Options *const att_options = FindAttributeOptions(att_key); + if (att_options) { + return att_options->IsOptionSet(name); + } + return global_options_.IsOptionSet(name); +} + +template +void DracoOptions::SetAttributeOptions( + const AttributeKey &att_key, const Options &options) { + Options *att_options = GetAttributeOptions(att_key); + *att_options = options; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_CONFIG_DRACO_OPTIONS_H_ diff --git a/contrib/draco/src/draco/compression/config/encoder_options.h b/contrib/draco/src/draco/compression/config/encoder_options.h new file mode 100644 index 000000000..ed1b02068 --- /dev/null +++ b/contrib/draco/src/draco/compression/config/encoder_options.h @@ -0,0 +1,97 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_CONFIG_ENCODER_OPTIONS_H_ +#define DRACO_COMPRESSION_CONFIG_ENCODER_OPTIONS_H_ + +#include "draco/attributes/geometry_attribute.h" +#include "draco/compression/config/draco_options.h" +#include "draco/compression/config/encoding_features.h" +#include "draco/draco_features.h" + +namespace draco { + +// EncoderOptions allow users to specify so called feature options that are used +// to inform the encoder which encoding features can be used (i.e. which +// features are going to be available to the decoder). +template +class EncoderOptionsBase : public DracoOptions { + public: + static EncoderOptionsBase CreateDefaultOptions() { + EncoderOptionsBase options; +#ifdef DRACO_STANDARD_EDGEBREAKER_SUPPORTED + options.SetSupportedFeature(features::kEdgebreaker, true); +#endif +#ifdef DRACO_PREDICTIVE_EDGEBREAKER_SUPPORTED + options.SetSupportedFeature(features::kPredictiveEdgebreaker, true); +#endif + return options; + } + static EncoderOptionsBase CreateEmptyOptions() { + return EncoderOptionsBase(); + } + + // Returns speed options with default value of 5. + int GetEncodingSpeed() const { + return this->GetGlobalInt("encoding_speed", 5); + } + int GetDecodingSpeed() const { + return this->GetGlobalInt("decoding_speed", 5); + } + + // Returns the maximum speed for both encoding/decoding. + int GetSpeed() const { + const int encoding_speed = this->GetGlobalInt("encoding_speed", -1); + const int decoding_speed = this->GetGlobalInt("decoding_speed", -1); + const int max_speed = std::max(encoding_speed, decoding_speed); + if (max_speed == -1) { + return 5; // Default value. + } + return max_speed; + } + + void SetSpeed(int encoding_speed, int decoding_speed) { + this->SetGlobalInt("encoding_speed", encoding_speed); + this->SetGlobalInt("decoding_speed", decoding_speed); + } + + // Sets a given feature as supported or unsupported by the target decoder. + // Encoder will always use only supported features when encoding the input + // geometry. + void SetSupportedFeature(const std::string &name, bool supported) { + feature_options_.SetBool(name, supported); + } + bool IsFeatureSupported(const std::string &name) const { + return feature_options_.GetBool(name); + } + + void SetFeatureOptions(const Options &options) { feature_options_ = options; } + const Options &GetFeaturelOptions() const { return feature_options_; } + + private: + // Use helper methods to construct the encoder options. + // See CreateDefaultOptions(); + EncoderOptionsBase() {} + + // List of supported/unsupported features that can be used by the encoder. + Options feature_options_; +}; + +// Encoder options where attributes are identified by their attribute id. +// Used to set options that are specific to a given geometry. +typedef EncoderOptionsBase EncoderOptions; + +} // namespace draco + +#endif // DRACO_COMPRESSION_CONFIG_ENCODER_OPTIONS_H_ diff --git a/contrib/draco/src/draco/compression/config/encoding_features.h b/contrib/draco/src/draco/compression/config/encoding_features.h new file mode 100644 index 000000000..d6a8b7128 --- /dev/null +++ b/contrib/draco/src/draco/compression/config/encoding_features.h @@ -0,0 +1,39 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides helpful macros that define features available for encoding +// the input of the input geometry. These macros can be used as an input in +// the EncoderOptions::SetSupportedFeature() method instead of the text. +// The most recent set of features supported +// by the default implementation is: +// +// kEdgebreaker +// - edgebreaker method for encoding meshes. +// kPredictiveEdgebreaker +// - advanced version of the edgebreaker method (slower but better +// compression). +// +#ifndef DRACO_COMPRESSION_CONFIG_ENCODING_FEATURES_H_ +#define DRACO_COMPRESSION_CONFIG_ENCODING_FEATURES_H_ + +namespace draco { +namespace features { + +constexpr const char *kEdgebreaker = "standard_edgebreaker"; +constexpr const char *kPredictiveEdgebreaker = "predictive_edgebreaker"; + +} // namespace features +} // namespace draco + +#endif // DRACO_COMPRESSION_CONFIG_ENCODING_FEATURES_H_ diff --git a/contrib/draco/src/draco/compression/decode.cc b/contrib/draco/src/draco/compression/decode.cc new file mode 100644 index 000000000..92ae4ff66 --- /dev/null +++ b/contrib/draco/src/draco/compression/decode.cc @@ -0,0 +1,135 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/decode.h" + +#include "draco/compression/config/compression_shared.h" + +#ifdef DRACO_MESH_COMPRESSION_SUPPORTED +#include "draco/compression/mesh/mesh_edgebreaker_decoder.h" +#include "draco/compression/mesh/mesh_sequential_decoder.h" +#endif + +#ifdef DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED +#include "draco/compression/point_cloud/point_cloud_kd_tree_decoder.h" +#include "draco/compression/point_cloud/point_cloud_sequential_decoder.h" +#endif + +namespace draco { + +#ifdef DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED +StatusOr> CreatePointCloudDecoder( + int8_t method) { + if (method == POINT_CLOUD_SEQUENTIAL_ENCODING) { + return std::unique_ptr( + new PointCloudSequentialDecoder()); + } else if (method == POINT_CLOUD_KD_TREE_ENCODING) { + return std::unique_ptr(new PointCloudKdTreeDecoder()); + } + return Status(Status::DRACO_ERROR, "Unsupported encoding method."); +} +#endif + +#ifdef DRACO_MESH_COMPRESSION_SUPPORTED +StatusOr> CreateMeshDecoder(uint8_t method) { + if (method == MESH_SEQUENTIAL_ENCODING) { + return std::unique_ptr(new MeshSequentialDecoder()); + } else if (method == MESH_EDGEBREAKER_ENCODING) { + return std::unique_ptr(new MeshEdgebreakerDecoder()); + } + return Status(Status::DRACO_ERROR, "Unsupported encoding method."); +} +#endif + +StatusOr Decoder::GetEncodedGeometryType( + DecoderBuffer *in_buffer) { + DecoderBuffer temp_buffer(*in_buffer); + DracoHeader header; + DRACO_RETURN_IF_ERROR(PointCloudDecoder::DecodeHeader(&temp_buffer, &header)); + if (header.encoder_type >= NUM_ENCODED_GEOMETRY_TYPES) { + return Status(Status::DRACO_ERROR, "Unsupported geometry type."); + } + return static_cast(header.encoder_type); +} + +StatusOr> Decoder::DecodePointCloudFromBuffer( + DecoderBuffer *in_buffer) { + DRACO_ASSIGN_OR_RETURN(EncodedGeometryType type, + GetEncodedGeometryType(in_buffer)) + if (type == POINT_CLOUD) { +#ifdef DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED + std::unique_ptr point_cloud(new PointCloud()); + DRACO_RETURN_IF_ERROR(DecodeBufferToGeometry(in_buffer, point_cloud.get())) + return std::move(point_cloud); +#endif + } else if (type == TRIANGULAR_MESH) { +#ifdef DRACO_MESH_COMPRESSION_SUPPORTED + std::unique_ptr mesh(new Mesh()); + DRACO_RETURN_IF_ERROR(DecodeBufferToGeometry(in_buffer, mesh.get())) + return static_cast>(std::move(mesh)); +#endif + } + return Status(Status::DRACO_ERROR, "Unsupported geometry type."); +} + +StatusOr> Decoder::DecodeMeshFromBuffer( + DecoderBuffer *in_buffer) { + std::unique_ptr mesh(new Mesh()); + DRACO_RETURN_IF_ERROR(DecodeBufferToGeometry(in_buffer, mesh.get())) + return std::move(mesh); +} + +Status Decoder::DecodeBufferToGeometry(DecoderBuffer *in_buffer, + PointCloud *out_geometry) { +#ifdef DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED + DecoderBuffer temp_buffer(*in_buffer); + DracoHeader header; + DRACO_RETURN_IF_ERROR(PointCloudDecoder::DecodeHeader(&temp_buffer, &header)) + if (header.encoder_type != POINT_CLOUD) { + return Status(Status::DRACO_ERROR, "Input is not a point cloud."); + } + DRACO_ASSIGN_OR_RETURN(std::unique_ptr decoder, + CreatePointCloudDecoder(header.encoder_method)) + + DRACO_RETURN_IF_ERROR(decoder->Decode(options_, in_buffer, out_geometry)) + return OkStatus(); +#else + return Status(Status::DRACO_ERROR, "Unsupported geometry type."); +#endif +} + +Status Decoder::DecodeBufferToGeometry(DecoderBuffer *in_buffer, + Mesh *out_geometry) { +#ifdef DRACO_MESH_COMPRESSION_SUPPORTED + DecoderBuffer temp_buffer(*in_buffer); + DracoHeader header; + DRACO_RETURN_IF_ERROR(PointCloudDecoder::DecodeHeader(&temp_buffer, &header)) + if (header.encoder_type != TRIANGULAR_MESH) { + return Status(Status::DRACO_ERROR, "Input is not a mesh."); + } + DRACO_ASSIGN_OR_RETURN(std::unique_ptr decoder, + CreateMeshDecoder(header.encoder_method)) + + DRACO_RETURN_IF_ERROR(decoder->Decode(options_, in_buffer, out_geometry)) + return OkStatus(); +#else + return Status(Status::DRACO_ERROR, "Unsupported geometry type."); +#endif +} + +void Decoder::SetSkipAttributeTransform(GeometryAttribute::Type att_type) { + options_.SetAttributeBool(att_type, "skip_attribute_transform", true); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/decode.h b/contrib/draco/src/draco/compression/decode.h new file mode 100644 index 000000000..5f3fad26b --- /dev/null +++ b/contrib/draco/src/draco/compression/decode.h @@ -0,0 +1,80 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_DECODE_H_ +#define DRACO_COMPRESSION_DECODE_H_ + +#include "draco/compression/config/compression_shared.h" +#include "draco/compression/config/decoder_options.h" +#include "draco/core/decoder_buffer.h" +#include "draco/core/status_or.h" +#include "draco/draco_features.h" +#include "draco/mesh/mesh.h" + +namespace draco { + +// Class responsible for decoding of meshes and point clouds that were +// compressed by a Draco encoder. +class Decoder { + public: + // Returns the geometry type encoded in the input |in_buffer|. + // The return value is one of POINT_CLOUD, MESH or INVALID_GEOMETRY in case + // the input data is invalid. + // The decoded geometry type can be used to choose an appropriate decoding + // function for a given geometry type (see below). + static StatusOr GetEncodedGeometryType( + DecoderBuffer *in_buffer); + + // Decodes point cloud from the provided buffer. The buffer must be filled + // with data that was encoded with either the EncodePointCloudToBuffer or + // EncodeMeshToBuffer methods in encode.h. In case the input buffer contains + // mesh, the returned instance can be down-casted to Mesh. + StatusOr> DecodePointCloudFromBuffer( + DecoderBuffer *in_buffer); + + // Decodes a triangular mesh from the provided buffer. The mesh must be filled + // with data that was encoded using the EncodeMeshToBuffer method in encode.h. + // The function will return nullptr in case the input is invalid or if it was + // encoded with the EncodePointCloudToBuffer method. + StatusOr> DecodeMeshFromBuffer( + DecoderBuffer *in_buffer); + + // Decodes the buffer into a provided geometry. If the geometry is + // incompatible with the encoded data. For example, when |out_geometry| is + // draco::Mesh while the data contains a point cloud, the function will return + // an error status. + Status DecodeBufferToGeometry(DecoderBuffer *in_buffer, + PointCloud *out_geometry); + Status DecodeBufferToGeometry(DecoderBuffer *in_buffer, Mesh *out_geometry); + + // When set, the decoder is going to skip attribute transform for a given + // attribute type. For example for quantized attributes, the decoder would + // skip the dequantization step and the returned geometry would contain an + // attribute with quantized values. The attribute would also contain an + // instance of AttributeTransform class that is used to describe the skipped + // transform, including all parameters that are needed to perform the + // transform manually. + void SetSkipAttributeTransform(GeometryAttribute::Type att_type); + + // Returns the options instance used by the decoder that can be used by users + // to control the decoding process. + DecoderOptions *options() { return &options_; } + + private: + DecoderOptions options_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_DECODE_H_ diff --git a/contrib/draco/src/draco/compression/decode_test.cc b/contrib/draco/src/draco/compression/decode_test.cc new file mode 100644 index 000000000..198714690 --- /dev/null +++ b/contrib/draco/src/draco/compression/decode_test.cc @@ -0,0 +1,169 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/decode.h" + +#include +#include + +#include "draco/core/draco_test_base.h" +#include "draco/core/draco_test_utils.h" +#include "draco/io/file_utils.h" + +namespace { + +class DecodeTest : public ::testing::Test { + protected: + DecodeTest() {} +}; + +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED +TEST_F(DecodeTest, TestSkipAttributeTransform) { + const std::string file_name = "test_nm_quant.0.9.0.drc"; + // Tests that decoders can successfully skip attribute transform. + std::vector data; + ASSERT_TRUE( + draco::ReadFileToBuffer(draco::GetTestFileFullPath(file_name), &data)); + ASSERT_FALSE(data.empty()); + + // Create a draco decoding buffer. Note that no data is copied in this step. + draco::DecoderBuffer buffer; + buffer.Init(data.data(), data.size()); + + draco::Decoder decoder; + // Make sure we skip dequantization for the position attribute. + decoder.SetSkipAttributeTransform(draco::GeometryAttribute::POSITION); + + // Decode the input data into a geometry. + std::unique_ptr pc = + decoder.DecodePointCloudFromBuffer(&buffer).value(); + ASSERT_NE(pc, nullptr); + + const draco::PointAttribute *const pos_att = + pc->GetNamedAttribute(draco::GeometryAttribute::POSITION); + ASSERT_NE(pos_att, nullptr); + + // Ensure the position attribute is of type int32_t and that it has a valid + // attribute transform. + ASSERT_EQ(pos_att->data_type(), draco::DT_INT32); + ASSERT_NE(pos_att->GetAttributeTransformData(), nullptr); + + // Normal attribute should be left transformed. + const draco::PointAttribute *const norm_att = + pc->GetNamedAttribute(draco::GeometryAttribute::NORMAL); + ASSERT_EQ(norm_att->data_type(), draco::DT_FLOAT32); + ASSERT_EQ(norm_att->GetAttributeTransformData(), nullptr); +} +#endif + +void TestSkipAttributeTransformOnPointCloudWithColor(const std::string &file) { + std::vector data; + ASSERT_TRUE(draco::ReadFileToBuffer(draco::GetTestFileFullPath(file), &data)); + ASSERT_FALSE(data.empty()); + + // Create a draco decoding buffer. Note that no data is copied in this step. + draco::DecoderBuffer buffer; + buffer.Init(data.data(), data.size()); + + draco::Decoder decoder; + // Make sure we skip dequantization for the position attribute. + decoder.SetSkipAttributeTransform(draco::GeometryAttribute::POSITION); + + // Decode the input data into a geometry. + std::unique_ptr pc = + decoder.DecodePointCloudFromBuffer(&buffer).value(); + ASSERT_NE(pc, nullptr); + + const draco::PointAttribute *const pos_att = + pc->GetNamedAttribute(draco::GeometryAttribute::POSITION); + ASSERT_NE(pos_att, nullptr); + + // Ensure the position attribute is of type int32_t or uint32_t and that it + // has a valid attribute transform. + ASSERT_TRUE(pos_att->data_type() == draco::DT_INT32 || + pos_att->data_type() == draco::DT_UINT32); + ASSERT_NE(pos_att->GetAttributeTransformData(), nullptr); + + const draco::PointAttribute *const clr_att = + pc->GetNamedAttribute(draco::GeometryAttribute::COLOR); + ASSERT_EQ(clr_att->data_type(), draco::DT_UINT8); + + // Ensure the color attribute was decoded correctly. Perform the decoding + // again without skipping the position dequantization and compare the + // attribute values. + + draco::DecoderBuffer buffer_2; + buffer_2.Init(data.data(), data.size()); + + draco::Decoder decoder_2; + + // Decode the input data into a geometry. + std::unique_ptr pc_2 = + decoder_2.DecodePointCloudFromBuffer(&buffer_2).value(); + ASSERT_NE(pc_2, nullptr); + + const draco::PointAttribute *const clr_att_2 = + pc_2->GetNamedAttribute(draco::GeometryAttribute::COLOR); + ASSERT_NE(clr_att_2, nullptr); + for (draco::PointIndex pi(0); pi < pc_2->num_points(); ++pi) { + // Colors should be exactly the same for both cases. + ASSERT_EQ(std::memcmp(clr_att->GetAddress(clr_att->mapped_index(pi)), + clr_att_2->GetAddress(clr_att_2->mapped_index(pi)), + clr_att->byte_stride()), + 0); + } +} + +TEST_F(DecodeTest, TestSkipAttributeTransformOnPointCloud) { + // Tests that decoders can successfully skip attribute transform on a point + // cloud with multiple attributes encoded with one attributes encoder. + TestSkipAttributeTransformOnPointCloudWithColor("pc_color.drc"); + TestSkipAttributeTransformOnPointCloudWithColor("pc_kd_color.drc"); +} + +TEST_F(DecodeTest, TestSkipAttributeTransformWithNoQuantization) { + // Tests that decoders can successfully skip attribute transform even though + // the input model was not quantized (it has no attribute transform). + const std::string file_name = "point_cloud_no_qp.drc"; + std::vector data; + ASSERT_TRUE( + draco::ReadFileToBuffer(draco::GetTestFileFullPath(file_name), &data)); + ASSERT_FALSE(data.empty()); + + // Create a draco decoding buffer. Note that no data is copied in this step. + draco::DecoderBuffer buffer; + buffer.Init(data.data(), data.size()); + + draco::Decoder decoder; + // Make sure we skip dequantization for the position attribute. + decoder.SetSkipAttributeTransform(draco::GeometryAttribute::POSITION); + + // Decode the input data into a geometry. + std::unique_ptr pc = + decoder.DecodePointCloudFromBuffer(&buffer).value(); + ASSERT_NE(pc, nullptr); + + const draco::PointAttribute *const pos_att = + pc->GetNamedAttribute(draco::GeometryAttribute::POSITION); + ASSERT_NE(pos_att, nullptr); + + // Ensure the position attribute is of type float32 since the attribute was + // not quantized. + ASSERT_EQ(pos_att->data_type(), draco::DT_FLOAT32); + + // Make sure there is no attribute transform available for the attribute. + ASSERT_EQ(pos_att->GetAttributeTransformData(), nullptr); +} + +} // namespace diff --git a/contrib/draco/src/draco/compression/encode.cc b/contrib/draco/src/draco/compression/encode.cc new file mode 100644 index 000000000..f380aec15 --- /dev/null +++ b/contrib/draco/src/draco/compression/encode.cc @@ -0,0 +1,96 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/encode.h" + +#include "draco/compression/expert_encode.h" + +namespace draco { + +Encoder::Encoder() {} + +Status Encoder::EncodePointCloudToBuffer(const PointCloud &pc, + EncoderBuffer *out_buffer) { + ExpertEncoder encoder(pc); + encoder.Reset(CreateExpertEncoderOptions(pc)); + return encoder.EncodeToBuffer(out_buffer); +} + +Status Encoder::EncodeMeshToBuffer(const Mesh &m, EncoderBuffer *out_buffer) { + ExpertEncoder encoder(m); + encoder.Reset(CreateExpertEncoderOptions(m)); + DRACO_RETURN_IF_ERROR(encoder.EncodeToBuffer(out_buffer)); + set_num_encoded_points(encoder.num_encoded_points()); + set_num_encoded_faces(encoder.num_encoded_faces()); + return OkStatus(); +} + +EncoderOptions Encoder::CreateExpertEncoderOptions(const PointCloud &pc) const { + EncoderOptions ret_options = EncoderOptions::CreateEmptyOptions(); + ret_options.SetGlobalOptions(options().GetGlobalOptions()); + ret_options.SetFeatureOptions(options().GetFeaturelOptions()); + // Convert type-based attribute options to specific attributes in the provided + // point cloud. + for (int i = 0; i < pc.num_attributes(); ++i) { + const Options *att_options = + options().FindAttributeOptions(pc.attribute(i)->attribute_type()); + if (att_options) { + ret_options.SetAttributeOptions(i, *att_options); + } + } + return ret_options; +} + +void Encoder::Reset( + const EncoderOptionsBase &options) { + Base::Reset(options); +} + +void Encoder::Reset() { Base::Reset(); } + +void Encoder::SetSpeedOptions(int encoding_speed, int decoding_speed) { + Base::SetSpeedOptions(encoding_speed, decoding_speed); +} + +void Encoder::SetAttributeQuantization(GeometryAttribute::Type type, + int quantization_bits) { + options().SetAttributeInt(type, "quantization_bits", quantization_bits); +} + +void Encoder::SetAttributeExplicitQuantization(GeometryAttribute::Type type, + int quantization_bits, + int num_dims, + const float *origin, + float range) { + options().SetAttributeInt(type, "quantization_bits", quantization_bits); + options().SetAttributeVector(type, "quantization_origin", num_dims, origin); + options().SetAttributeFloat(type, "quantization_range", range); +} + +void Encoder::SetEncodingMethod(int encoding_method) { + Base::SetEncodingMethod(encoding_method); +} + +Status Encoder::SetAttributePredictionScheme(GeometryAttribute::Type type, + int prediction_scheme_method) { + Status status = CheckPredictionScheme(type, prediction_scheme_method); + if (!status.ok()) { + return status; + } + options().SetAttributeInt(type, "prediction_scheme", + prediction_scheme_method); + return status; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/encode.h b/contrib/draco/src/draco/compression/encode.h new file mode 100644 index 000000000..bce8b34c2 --- /dev/null +++ b/contrib/draco/src/draco/compression/encode.h @@ -0,0 +1,140 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ENCODE_H_ +#define DRACO_COMPRESSION_ENCODE_H_ + +#include "draco/compression/config/compression_shared.h" +#include "draco/compression/config/encoder_options.h" +#include "draco/compression/encode_base.h" +#include "draco/core/encoder_buffer.h" +#include "draco/core/status.h" +#include "draco/mesh/mesh.h" + +namespace draco { + +// Basic helper class for encoding geometry using the Draco compression library. +// The class provides various methods that can be used to control several common +// options used during the encoding, such as the number of quantization bits for +// a given attribute. All these options are defined per attribute type, i.e., +// if there are more attributes of the same type (such as multiple texture +// coordinate attributes), the same options are going to be used for all of the +// attributes of this type. If different attributes of the same type need to +// use different options, use ExpertEncoder in expert_encode.h. +class Encoder + : public EncoderBase> { + public: + typedef EncoderBase> Base; + + Encoder(); + virtual ~Encoder() {} + + // Encodes a point cloud to the provided buffer. + virtual Status EncodePointCloudToBuffer(const PointCloud &pc, + EncoderBuffer *out_buffer); + + // Encodes a mesh to the provided buffer. + virtual Status EncodeMeshToBuffer(const Mesh &m, EncoderBuffer *out_buffer); + + // Set encoder options used during the geometry encoding. Note that this call + // overwrites any modifications to the options done with the functions below, + // i.e., it resets the encoder. + void Reset(const EncoderOptionsBase &options); + void Reset(); + + // Sets the desired encoding and decoding speed for the given options. + // + // 0 = slowest speed, but the best compression. + // 10 = fastest, but the worst compression. + // -1 = undefined. + // + // Note that both speed options affect the encoder choice of used methods and + // algorithms. For example, a requirement for fast decoding may prevent the + // encoder from using the best compression methods even if the encoding speed + // is set to 0. In general, the faster of the two options limits the choice of + // features that can be used by the encoder. Additionally, setting + // |decoding_speed| to be faster than the |encoding_speed| may allow the + // encoder to choose the optimal method out of the available features for the + // given |decoding_speed|. + void SetSpeedOptions(int encoding_speed, int decoding_speed); + + // Sets the quantization compression options for a named attribute. The + // attribute values will be quantized in a box defined by the maximum extent + // of the attribute values. I.e., the actual precision of this option depends + // on the scale of the attribute values. + void SetAttributeQuantization(GeometryAttribute::Type type, + int quantization_bits); + + // Sets the explicit quantization compression for a named attribute. The + // attribute values will be quantized in a coordinate system defined by the + // provided origin and range (the input values should be within interval: + // ). + void SetAttributeExplicitQuantization(GeometryAttribute::Type type, + int quantization_bits, int num_dims, + const float *origin, float range); + + // Sets the desired prediction method for a given attribute. By default, + // prediction scheme is selected automatically by the encoder using other + // provided options (such as speed) and input geometry type (mesh, point + // cloud). This function should be called only when a specific prediction is + // preferred (e.g., when it is known that the encoder would select a less + // optimal prediction for the given input data). + // + // |prediction_scheme_method| should be one of the entries defined in + // compression/config/compression_shared.h : + // + // PREDICTION_NONE - use no prediction. + // PREDICTION_DIFFERENCE - delta coding + // MESH_PREDICTION_PARALLELOGRAM - parallelogram prediction for meshes. + // MESH_PREDICTION_CONSTRAINED_PARALLELOGRAM + // - better and more costly version of the parallelogram prediction. + // MESH_PREDICTION_TEX_COORDS_PORTABLE + // - specialized predictor for tex coordinates. + // MESH_PREDICTION_GEOMETRIC_NORMAL + // - specialized predictor for normal coordinates. + // + // Note that in case the desired prediction cannot be used, the default + // prediction will be automatically used instead. + Status SetAttributePredictionScheme(GeometryAttribute::Type type, + int prediction_scheme_method); + + // Sets the desired encoding method for a given geometry. By default, encoding + // method is selected based on the properties of the input geometry and based + // on the other options selected in the used EncoderOptions (such as desired + // encoding and decoding speed). This function should be called only when a + // specific method is required. + // + // |encoding_method| can be one of the values defined in + // compression/config/compression_shared.h based on the type of the input + // geometry that is going to be encoded. For point clouds, allowed entries are + // POINT_CLOUD_SEQUENTIAL_ENCODING + // POINT_CLOUD_KD_TREE_ENCODING + // + // For meshes the input can be + // MESH_SEQUENTIAL_ENCODING + // MESH_EDGEBREAKER_ENCODING + // + // If the selected method cannot be used for the given input, the subsequent + // call of EncodePointCloudToBuffer or EncodeMeshToBuffer is going to fail. + void SetEncodingMethod(int encoding_method); + + protected: + // Creates encoder options for the expert encoder used during the actual + // encoding. + EncoderOptions CreateExpertEncoderOptions(const PointCloud &pc) const; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ENCODE_H_ diff --git a/contrib/draco/src/draco/compression/encode_base.h b/contrib/draco/src/draco/compression/encode_base.h new file mode 100644 index 000000000..c501bc4fa --- /dev/null +++ b/contrib/draco/src/draco/compression/encode_base.h @@ -0,0 +1,131 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ENCODE_BASE_H_ +#define DRACO_COMPRESSION_ENCODE_BASE_H_ + +#include "draco/attributes/geometry_attribute.h" +#include "draco/compression/config/compression_shared.h" +#include "draco/core/status.h" + +namespace draco { + +// Base class for our geometry encoder classes. |EncoderOptionsT| specifies +// options class used by the encoder. Please, see encode.h and expert_encode.h +// for more details and method descriptions. +template +class EncoderBase { + public: + typedef EncoderOptionsT OptionsType; + + EncoderBase() + : options_(EncoderOptionsT::CreateDefaultOptions()), + num_encoded_points_(0), + num_encoded_faces_(0) {} + virtual ~EncoderBase() {} + + const EncoderOptionsT &options() const { return options_; } + EncoderOptionsT &options() { return options_; } + + // If enabled, it tells the encoder to keep track of the number of encoded + // points and faces (default = false). + // Note that this can slow down encoding for certain encoders. + void SetTrackEncodedProperties(bool flag); + + // Returns the number of encoded points and faces during the last encoding + // operation. Returns 0 if SetTrackEncodedProperties() was not set. + size_t num_encoded_points() const { return num_encoded_points_; } + size_t num_encoded_faces() const { return num_encoded_faces_; } + + protected: + void Reset(const EncoderOptionsT &options) { options_ = options; } + + void Reset() { options_ = EncoderOptionsT::CreateDefaultOptions(); } + + void SetSpeedOptions(int encoding_speed, int decoding_speed) { + options_.SetSpeed(encoding_speed, decoding_speed); + } + + void SetEncodingMethod(int encoding_method) { + options_.SetGlobalInt("encoding_method", encoding_method); + } + + void SetEncodingSubmethod(int encoding_submethod) { + options_.SetGlobalInt("encoding_submethod", encoding_submethod); + } + + Status CheckPredictionScheme(GeometryAttribute::Type att_type, + int prediction_scheme) const { + // Out of bound checks: + if (prediction_scheme < PREDICTION_NONE) { + return Status(Status::DRACO_ERROR, + "Invalid prediction scheme requested."); + } + if (prediction_scheme >= NUM_PREDICTION_SCHEMES) { + return Status(Status::DRACO_ERROR, + "Invalid prediction scheme requested."); + } + // Deprecated prediction schemes: + if (prediction_scheme == MESH_PREDICTION_TEX_COORDS_DEPRECATED) { + return Status(Status::DRACO_ERROR, + "MESH_PREDICTION_TEX_COORDS_DEPRECATED is deprecated."); + } + if (prediction_scheme == MESH_PREDICTION_MULTI_PARALLELOGRAM) { + return Status(Status::DRACO_ERROR, + "MESH_PREDICTION_MULTI_PARALLELOGRAM is deprecated."); + } + // Attribute specific checks: + if (prediction_scheme == MESH_PREDICTION_TEX_COORDS_PORTABLE) { + if (att_type != GeometryAttribute::TEX_COORD) { + return Status(Status::DRACO_ERROR, + "Invalid prediction scheme for attribute type."); + } + } + if (prediction_scheme == MESH_PREDICTION_GEOMETRIC_NORMAL) { + if (att_type != GeometryAttribute::NORMAL) { + return Status(Status::DRACO_ERROR, + "Invalid prediction scheme for attribute type."); + } + } + // TODO(hemmer): Try to enable more prediction schemes for normals. + if (att_type == GeometryAttribute::NORMAL) { + if (!(prediction_scheme == PREDICTION_DIFFERENCE || + prediction_scheme == MESH_PREDICTION_GEOMETRIC_NORMAL)) { + return Status(Status::DRACO_ERROR, + "Invalid prediction scheme for attribute type."); + } + } + return OkStatus(); + } + + protected: + void set_num_encoded_points(size_t num) { num_encoded_points_ = num; } + void set_num_encoded_faces(size_t num) { num_encoded_faces_ = num; } + + private: + EncoderOptionsT options_; + + size_t num_encoded_points_; + size_t num_encoded_faces_; +}; + +template +void EncoderBase::SetTrackEncodedProperties(bool flag) { + options_.SetGlobalBool("store_number_of_encoded_points", flag); + options_.SetGlobalBool("store_number_of_encoded_faces", flag); +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ENCODE_BASE_H_ diff --git a/contrib/draco/src/draco/compression/encode_test.cc b/contrib/draco/src/draco/compression/encode_test.cc new file mode 100644 index 000000000..fde4f6f5b --- /dev/null +++ b/contrib/draco/src/draco/compression/encode_test.cc @@ -0,0 +1,407 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "draco/compression/encode.h" + +#include +#include +#include + +#include "draco/attributes/attribute_quantization_transform.h" +#include "draco/compression/config/compression_shared.h" +#include "draco/compression/decode.h" +#include "draco/compression/expert_encode.h" +#include "draco/core/draco_test_base.h" +#include "draco/core/draco_test_utils.h" +#include "draco/core/vector_d.h" +#include "draco/io/obj_decoder.h" +#include "draco/mesh/triangle_soup_mesh_builder.h" +#include "draco/point_cloud/point_cloud_builder.h" + +namespace { + +class EncodeTest : public ::testing::Test { + protected: + EncodeTest() {} + std::unique_ptr CreateTestMesh() const { + draco::TriangleSoupMeshBuilder mesh_builder; + + // Create a simple mesh with one face. + mesh_builder.Start(1); + + // Add one position attribute and two texture coordinate attributes. + const int32_t pos_att_id = mesh_builder.AddAttribute( + draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32); + const int32_t tex_att_id_0 = mesh_builder.AddAttribute( + draco::GeometryAttribute::TEX_COORD, 2, draco::DT_FLOAT32); + const int32_t tex_att_id_1 = mesh_builder.AddAttribute( + draco::GeometryAttribute::TEX_COORD, 2, draco::DT_FLOAT32); + + // Initialize the attribute values. + mesh_builder.SetAttributeValuesForFace( + pos_att_id, draco::FaceIndex(0), draco::Vector3f(0.f, 0.f, 0.f).data(), + draco::Vector3f(1.f, 0.f, 0.f).data(), + draco::Vector3f(1.f, 1.f, 0.f).data()); + mesh_builder.SetAttributeValuesForFace( + tex_att_id_0, draco::FaceIndex(0), draco::Vector2f(0.f, 0.f).data(), + draco::Vector2f(1.f, 0.f).data(), draco::Vector2f(1.f, 1.f).data()); + mesh_builder.SetAttributeValuesForFace( + tex_att_id_1, draco::FaceIndex(0), draco::Vector2f(0.f, 0.f).data(), + draco::Vector2f(1.f, 0.f).data(), draco::Vector2f(1.f, 1.f).data()); + + return mesh_builder.Finalize(); + } + + std::unique_ptr CreateTestPointCloud() const { + draco::PointCloudBuilder pc_builder; + + constexpr int kNumPoints = 100; + constexpr int kNumGenAttCoords0 = 4; + constexpr int kNumGenAttCoords1 = 6; + pc_builder.Start(kNumPoints); + + // Add one position attribute and two generic attributes. + const int32_t pos_att_id = pc_builder.AddAttribute( + draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32); + const int32_t gen_att_id_0 = pc_builder.AddAttribute( + draco::GeometryAttribute::GENERIC, kNumGenAttCoords0, draco::DT_UINT32); + const int32_t gen_att_id_1 = pc_builder.AddAttribute( + draco::GeometryAttribute::GENERIC, kNumGenAttCoords1, draco::DT_UINT8); + + std::vector gen_att_data_0(kNumGenAttCoords0); + std::vector gen_att_data_1(kNumGenAttCoords1); + + // Initialize the attribute values. + for (draco::PointIndex i(0); i < kNumPoints; ++i) { + const float pos_coord = static_cast(i.value()); + pc_builder.SetAttributeValueForPoint( + pos_att_id, i, + draco::Vector3f(pos_coord, -pos_coord, pos_coord).data()); + + for (int j = 0; j < kNumGenAttCoords0; ++j) { + gen_att_data_0[j] = i.value(); + } + pc_builder.SetAttributeValueForPoint(gen_att_id_0, i, + gen_att_data_0.data()); + + for (int j = 0; j < kNumGenAttCoords1; ++j) { + gen_att_data_1[j] = -i.value(); + } + pc_builder.SetAttributeValueForPoint(gen_att_id_1, i, + gen_att_data_1.data()); + } + return pc_builder.Finalize(false); + } + + std::unique_ptr CreateTestPointCloudPosNorm() const { + draco::PointCloudBuilder pc_builder; + + constexpr int kNumPoints = 20; + pc_builder.Start(kNumPoints); + + // Add one position attribute and a normal attribute. + const int32_t pos_att_id = pc_builder.AddAttribute( + draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32); + const int32_t norm_att_id = pc_builder.AddAttribute( + draco::GeometryAttribute::NORMAL, 3, draco::DT_FLOAT32); + + // Initialize the attribute values. + for (draco::PointIndex i(0); i < kNumPoints; ++i) { + const float pos_coord = static_cast(i.value()); + pc_builder.SetAttributeValueForPoint( + pos_att_id, i, + draco::Vector3f(pos_coord, -pos_coord, pos_coord).data()); + + // Pseudo-random normal. + draco::Vector3f norm(pos_coord * 2.f, pos_coord - 2.f, pos_coord * 3.f); + norm.Normalize(); + pc_builder.SetAttributeValueForPoint(norm_att_id, i, norm.data()); + } + + return pc_builder.Finalize(false); + } + + int GetQuantizationBitsFromAttribute(const draco::PointAttribute *att) const { + if (att == nullptr) { + return -1; + } + draco::AttributeQuantizationTransform transform; + if (!transform.InitFromAttribute(*att)) { + return -1; + } + return transform.quantization_bits(); + } + + void VerifyNumQuantizationBits(const draco::EncoderBuffer &buffer, + int pos_quantization, + int tex_coord_0_quantization, + int tex_coord_1_quantization) const { + draco::Decoder decoder; + + // Skip the dequantization for the attributes which will allow us to get + // the number of quantization bits used during encoding. + decoder.SetSkipAttributeTransform(draco::GeometryAttribute::POSITION); + decoder.SetSkipAttributeTransform(draco::GeometryAttribute::TEX_COORD); + + draco::DecoderBuffer in_buffer; + in_buffer.Init(buffer.data(), buffer.size()); + auto mesh = decoder.DecodeMeshFromBuffer(&in_buffer).value(); + ASSERT_NE(mesh, nullptr); + ASSERT_EQ(GetQuantizationBitsFromAttribute(mesh->attribute(0)), + pos_quantization); + ASSERT_EQ(GetQuantizationBitsFromAttribute(mesh->attribute(1)), + tex_coord_0_quantization); + ASSERT_EQ(GetQuantizationBitsFromAttribute(mesh->attribute(2)), + tex_coord_1_quantization); + } + + // Tests that the encoder returns the correct number of encoded points and + // faces for a given mesh or point cloud. + void TestNumberOfEncodedEntries(const std::string &file_name, + int32_t encoding_method) { + std::unique_ptr geometry; + draco::Mesh *mesh = nullptr; + + if (encoding_method == draco::MESH_EDGEBREAKER_ENCODING || + encoding_method == draco::MESH_SEQUENTIAL_ENCODING) { + std::unique_ptr mesh_tmp = + draco::ReadMeshFromTestFile(file_name); + mesh = mesh_tmp.get(); + if (!mesh->DeduplicateAttributeValues()) { + return; + } + mesh->DeduplicatePointIds(); + geometry = std::move(mesh_tmp); + } else { + geometry = draco::ReadPointCloudFromTestFile(file_name); + } + ASSERT_NE(mesh, nullptr); + + draco::Encoder encoder; + encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 14); + encoder.SetAttributeQuantization(draco::GeometryAttribute::TEX_COORD, 12); + encoder.SetAttributeQuantization(draco::GeometryAttribute::NORMAL, 10); + + encoder.SetEncodingMethod(encoding_method); + + encoder.SetTrackEncodedProperties(true); + + draco::EncoderBuffer buffer; + if (mesh) { + encoder.EncodeMeshToBuffer(*mesh, &buffer); + } else { + encoder.EncodePointCloudToBuffer(*geometry, &buffer); + } + + // Ensure the logged number of encoded points and faces matches the number + // we get from the decoder. + + draco::DecoderBuffer decoder_buffer; + decoder_buffer.Init(buffer.data(), buffer.size()); + draco::Decoder decoder; + + if (mesh) { + auto maybe_mesh = decoder.DecodeMeshFromBuffer(&decoder_buffer); + ASSERT_TRUE(maybe_mesh.ok()); + auto decoded_mesh = std::move(maybe_mesh).value(); + ASSERT_NE(decoded_mesh, nullptr); + ASSERT_EQ(decoded_mesh->num_points(), encoder.num_encoded_points()); + ASSERT_EQ(decoded_mesh->num_faces(), encoder.num_encoded_faces()); + } else { + auto maybe_pc = decoder.DecodePointCloudFromBuffer(&decoder_buffer); + ASSERT_TRUE(maybe_pc.ok()); + auto decoded_pc = std::move(maybe_pc).value(); + ASSERT_EQ(decoded_pc->num_points(), encoder.num_encoded_points()); + } + } +}; + +TEST_F(EncodeTest, TestExpertEncoderQuantization) { + // This test verifies that the expert encoder can quantize individual + // attributes even if they have the same type. + auto mesh = CreateTestMesh(); + ASSERT_NE(mesh, nullptr); + + draco::ExpertEncoder encoder(*mesh); + encoder.SetAttributeQuantization(0, 16); // Position quantization. + encoder.SetAttributeQuantization(1, 15); // Tex-coord 0 quantization. + encoder.SetAttributeQuantization(2, 14); // Tex-coord 1 quantization. + + draco::EncoderBuffer buffer; + encoder.EncodeToBuffer(&buffer); + VerifyNumQuantizationBits(buffer, 16, 15, 14); +} + +TEST_F(EncodeTest, TestEncoderQuantization) { + // This test verifies that Encoder applies the same quantization to all + // attributes of the same type. + auto mesh = CreateTestMesh(); + ASSERT_NE(mesh, nullptr); + + draco::Encoder encoder; + encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 16); + encoder.SetAttributeQuantization(draco::GeometryAttribute::TEX_COORD, 15); + + draco::EncoderBuffer buffer; + encoder.EncodeMeshToBuffer(*mesh, &buffer); + VerifyNumQuantizationBits(buffer, 16, 15, 15); +} + +TEST_F(EncodeTest, TestLinesObj) { + // This test verifies that Encoder can encode file that contains only line + // segments (that are ignored). + std::unique_ptr mesh( + draco::ReadMeshFromTestFile("test_lines.obj")); + ASSERT_NE(mesh, nullptr); + ASSERT_EQ(mesh->num_faces(), 0); + std::unique_ptr pc( + draco::ReadPointCloudFromTestFile("test_lines.obj")); + ASSERT_NE(pc, nullptr); + + draco::Encoder encoder; + encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 16); + + draco::EncoderBuffer buffer; + ASSERT_TRUE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok()); +} + +TEST_F(EncodeTest, TestQuantizedInfinity) { + // This test verifies that Encoder fails to encode point cloud when requesting + // quantization of attribute that contains infinity values. + std::unique_ptr pc( + draco::ReadPointCloudFromTestFile("float_inf_point_cloud.ply")); + ASSERT_NE(pc, nullptr); + + { + draco::Encoder encoder; + encoder.SetEncodingMethod(draco::POINT_CLOUD_SEQUENTIAL_ENCODING); + encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 11); + + draco::EncoderBuffer buffer; + ASSERT_FALSE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok()); + } + + { + draco::Encoder encoder; + encoder.SetEncodingMethod(draco::POINT_CLOUD_KD_TREE_ENCODING); + encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 11); + + draco::EncoderBuffer buffer; + ASSERT_FALSE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok()); + } +} + +TEST_F(EncodeTest, TestUnquantizedInfinity) { + // This test verifies that Encoder can successfully encode point cloud when + // not requesting quantization of attribute that contains infinity values. + std::unique_ptr pc( + draco::ReadPointCloudFromTestFile("float_inf_point_cloud.ply")); + ASSERT_NE(pc, nullptr); + + // Note that the KD tree encoding method is not applicable to float values. + draco::Encoder encoder; + encoder.SetEncodingMethod(draco::POINT_CLOUD_SEQUENTIAL_ENCODING); + + draco::EncoderBuffer buffer; + ASSERT_TRUE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok()); +} + +TEST_F(EncodeTest, TestQuantizedAndUnquantizedAttributes) { + // This test verifies that Encoder can successfully encode point cloud with + // two float attribiutes - one quantized and another unquantized. The encoder + // defaults to sequential encoding in this case. + std::unique_ptr pc( + draco::ReadPointCloudFromTestFile("float_two_att_point_cloud.ply")); + ASSERT_NE(pc, nullptr); + + draco::Encoder encoder; + encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 11); + encoder.SetAttributeQuantization(draco::GeometryAttribute::NORMAL, 0); + draco::EncoderBuffer buffer; + ASSERT_TRUE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok()); +} + +TEST_F(EncodeTest, TestKdTreeEncoding) { + // This test verifies that the API can successfully encode a point cloud + // defined by several attributes using the kd tree method. + std::unique_ptr pc = CreateTestPointCloud(); + ASSERT_NE(pc, nullptr); + + draco::EncoderBuffer buffer; + draco::Encoder encoder; + encoder.SetEncodingMethod(draco::POINT_CLOUD_KD_TREE_ENCODING); + // First try it without quantizing positions which should fail. + ASSERT_FALSE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok()); + + // Now set quantization for the position attribute which should make + // the encoder happy. + encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 16); + ASSERT_TRUE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok()); +} + +TEST_F(EncodeTest, TestTrackingOfNumberOfEncodedEntries) { + TestNumberOfEncodedEntries("deg_faces.obj", draco::MESH_EDGEBREAKER_ENCODING); + TestNumberOfEncodedEntries("deg_faces.obj", draco::MESH_SEQUENTIAL_ENCODING); + TestNumberOfEncodedEntries("cube_att.obj", draco::MESH_EDGEBREAKER_ENCODING); + TestNumberOfEncodedEntries("test_nm.obj", draco::MESH_EDGEBREAKER_ENCODING); + TestNumberOfEncodedEntries("test_nm.obj", draco::MESH_SEQUENTIAL_ENCODING); + TestNumberOfEncodedEntries("cube_subd.obj", + draco::POINT_CLOUD_KD_TREE_ENCODING); + TestNumberOfEncodedEntries("cube_subd.obj", + draco::POINT_CLOUD_SEQUENTIAL_ENCODING); +} + +TEST_F(EncodeTest, TestTrackingOfNumberOfEncodedEntriesNotSet) { + // Tests that when tracing of encoded properties is disabled, the returned + // number of encoded faces and points is 0. + std::unique_ptr mesh( + draco::ReadMeshFromTestFile("cube_att.obj")); + ASSERT_NE(mesh, nullptr); + + draco::EncoderBuffer buffer; + draco::Encoder encoder; + + ASSERT_TRUE(encoder.EncodeMeshToBuffer(*mesh, &buffer).ok()); + ASSERT_EQ(encoder.num_encoded_points(), 0); + ASSERT_EQ(encoder.num_encoded_faces(), 0); +} + +TEST_F(EncodeTest, TestNoPosQuantizationNormalCoding) { + // Tests that we can encode and decode a file with quantized normals but + // non-quantized positions. + const auto mesh = draco::ReadMeshFromTestFile("test_nm.obj"); + ASSERT_NE(mesh, nullptr); + + // The mesh should have positions and normals. + ASSERT_NE(mesh->GetNamedAttribute(draco::GeometryAttribute::POSITION), + nullptr); + ASSERT_NE(mesh->GetNamedAttribute(draco::GeometryAttribute::NORMAL), nullptr); + + draco::EncoderBuffer buffer; + draco::Encoder encoder; + // No quantization for positions. + encoder.SetAttributeQuantization(draco::GeometryAttribute::NORMAL, 8); + + DRACO_ASSERT_OK(encoder.EncodeMeshToBuffer(*mesh, &buffer)); + + draco::Decoder decoder; + + draco::DecoderBuffer in_buffer; + in_buffer.Init(buffer.data(), buffer.size()); + const auto decoded_mesh = decoder.DecodeMeshFromBuffer(&in_buffer).value(); + ASSERT_NE(decoded_mesh, nullptr); +} + +} // namespace diff --git a/contrib/draco/src/draco/compression/entropy/ans.h b/contrib/draco/src/draco/compression/entropy/ans.h new file mode 100644 index 000000000..c71d58975 --- /dev/null +++ b/contrib/draco/src/draco/compression/entropy/ans.h @@ -0,0 +1,527 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ENTROPY_ANS_H_ +#define DRACO_COMPRESSION_ENTROPY_ANS_H_ +// An implementation of Asymmetric Numeral Systems (rANS). +// See http://arxiv.org/abs/1311.2540v2 for more information on rANS. +// This file is based off libvpx's ans.h. + +#include + +#define DRACO_ANS_DIVIDE_BY_MULTIPLY 1 +#if DRACO_ANS_DIVIDE_BY_MULTIPLY +#include "draco/core/divide.h" +#endif +#include "draco/core/macros.h" + +namespace draco { + +#if DRACO_ANS_DIVIDE_BY_MULTIPLY + +#define DRACO_ANS_DIVREM(quotient, remainder, dividend, divisor) \ + do { \ + quotient = fastdiv(dividend, divisor); \ + remainder = dividend - quotient * divisor; \ + } while (0) +#define DRACO_ANS_DIV(dividend, divisor) fastdiv(dividend, divisor) +#else +#define DRACO_ANS_DIVREM(quotient, remainder, dividend, divisor) \ + do { \ + quotient = dividend / divisor; \ + remainder = dividend % divisor; \ + } while (0) +#define DRACO_ANS_DIV(dividend, divisor) ((dividend) / (divisor)) +#endif + +struct AnsCoder { + AnsCoder() : buf(nullptr), buf_offset(0), state(0) {} + uint8_t *buf; + int buf_offset; + uint32_t state; +}; + +struct AnsDecoder { + AnsDecoder() : buf(nullptr), buf_offset(0), state(0) {} + const uint8_t *buf; + int buf_offset; + uint32_t state; +}; + +typedef uint8_t AnsP8; +#define DRACO_ANS_P8_PRECISION 256u +#define DRACO_ANS_L_BASE (4096u) +#define DRACO_ANS_IO_BASE 256 + +static uint32_t mem_get_le16(const void *vmem) { + uint32_t val; + const uint8_t *mem = (const uint8_t *)vmem; + + val = mem[1] << 8; + val |= mem[0]; + return val; +} + +static uint32_t mem_get_le24(const void *vmem) { + uint32_t val; + const uint8_t *mem = (const uint8_t *)vmem; + + val = mem[2] << 16; + val |= mem[1] << 8; + val |= mem[0]; + return val; +} + +static inline uint32_t mem_get_le32(const void *vmem) { + uint32_t val; + const uint8_t *mem = (const uint8_t *)vmem; + + val = mem[3] << 24; + val |= mem[2] << 16; + val |= mem[1] << 8; + val |= mem[0]; + return val; +} + +static inline void mem_put_le16(void *vmem, uint32_t val) { + uint8_t *mem = reinterpret_cast(vmem); + + mem[0] = (val >> 0) & 0xff; + mem[1] = (val >> 8) & 0xff; +} + +static inline void mem_put_le24(void *vmem, uint32_t val) { + uint8_t *mem = reinterpret_cast(vmem); + + mem[0] = (val >> 0) & 0xff; + mem[1] = (val >> 8) & 0xff; + mem[2] = (val >> 16) & 0xff; +} + +static inline void mem_put_le32(void *vmem, uint32_t val) { + uint8_t *mem = reinterpret_cast(vmem); + + mem[0] = (val >> 0) & 0xff; + mem[1] = (val >> 8) & 0xff; + mem[2] = (val >> 16) & 0xff; + mem[3] = (val >> 24) & 0xff; +} + +static inline void ans_write_init(struct AnsCoder *const ans, + uint8_t *const buf) { + ans->buf = buf; + ans->buf_offset = 0; + ans->state = DRACO_ANS_L_BASE; +} + +static inline int ans_write_end(struct AnsCoder *const ans) { + uint32_t state; + DRACO_DCHECK_GE(ans->state, DRACO_ANS_L_BASE); + DRACO_DCHECK_LT(ans->state, DRACO_ANS_L_BASE * DRACO_ANS_IO_BASE); + state = ans->state - DRACO_ANS_L_BASE; + if (state < (1 << 6)) { + ans->buf[ans->buf_offset] = (0x00 << 6) + state; + return ans->buf_offset + 1; + } else if (state < (1 << 14)) { + mem_put_le16(ans->buf + ans->buf_offset, (0x01 << 14) + state); + return ans->buf_offset + 2; + } else if (state < (1 << 22)) { + mem_put_le24(ans->buf + ans->buf_offset, (0x02 << 22) + state); + return ans->buf_offset + 3; + } else { + DRACO_DCHECK(0 && "State is too large to be serialized"); + return ans->buf_offset; + } +} + +// rABS with descending spread. +// p or p0 takes the place of l_s from the paper. +// DRACO_ANS_P8_PRECISION is m. +static inline void rabs_desc_write(struct AnsCoder *ans, int val, AnsP8 p0) { + const AnsP8 p = DRACO_ANS_P8_PRECISION - p0; + const unsigned l_s = val ? p : p0; + unsigned quot, rem; + if (ans->state >= + DRACO_ANS_L_BASE / DRACO_ANS_P8_PRECISION * DRACO_ANS_IO_BASE * l_s) { + ans->buf[ans->buf_offset++] = ans->state % DRACO_ANS_IO_BASE; + ans->state /= DRACO_ANS_IO_BASE; + } + DRACO_ANS_DIVREM(quot, rem, ans->state, l_s); + ans->state = quot * DRACO_ANS_P8_PRECISION + rem + (val ? 0 : p); +} + +#define DRACO_ANS_IMPL1 0 +#define UNPREDICTABLE(x) x +static inline int rabs_desc_read(struct AnsDecoder *ans, AnsP8 p0) { + int val; +#if DRACO_ANS_IMPL1 + unsigned l_s; +#else + unsigned quot, rem, x, xn; +#endif + const AnsP8 p = DRACO_ANS_P8_PRECISION - p0; + if (ans->state < DRACO_ANS_L_BASE && ans->buf_offset > 0) { + ans->state = ans->state * DRACO_ANS_IO_BASE + ans->buf[--ans->buf_offset]; + } +#if DRACO_ANS_IMPL1 + val = ans->state % DRACO_ANS_P8_PRECISION < p; + l_s = val ? p : p0; + ans->state = (ans->state / DRACO_ANS_P8_PRECISION) * l_s + + ans->state % DRACO_ANS_P8_PRECISION - (!val * p); +#else + x = ans->state; + quot = x / DRACO_ANS_P8_PRECISION; + rem = x % DRACO_ANS_P8_PRECISION; + xn = quot * p; + val = rem < p; + if (UNPREDICTABLE(val)) { + ans->state = xn + rem; + } else { + // ans->state = quot * p0 + rem - p; + ans->state = x - xn - p; + } +#endif + return val; +} + +// rABS with ascending spread. +// p or p0 takes the place of l_s from the paper. +// DRACO_ANS_P8_PRECISION is m. +static inline void rabs_asc_write(struct AnsCoder *ans, int val, AnsP8 p0) { + const AnsP8 p = DRACO_ANS_P8_PRECISION - p0; + const unsigned l_s = val ? p : p0; + unsigned quot, rem; + if (ans->state >= + DRACO_ANS_L_BASE / DRACO_ANS_P8_PRECISION * DRACO_ANS_IO_BASE * l_s) { + ans->buf[ans->buf_offset++] = ans->state % DRACO_ANS_IO_BASE; + ans->state /= DRACO_ANS_IO_BASE; + } + DRACO_ANS_DIVREM(quot, rem, ans->state, l_s); + ans->state = quot * DRACO_ANS_P8_PRECISION + rem + (val ? p0 : 0); +} + +static inline int rabs_asc_read(struct AnsDecoder *ans, AnsP8 p0) { + int val; +#if DRACO_ANS_IMPL1 + unsigned l_s; +#else + unsigned quot, rem, x, xn; +#endif + const AnsP8 p = DRACO_ANS_P8_PRECISION - p0; + if (ans->state < DRACO_ANS_L_BASE) { + ans->state = ans->state * DRACO_ANS_IO_BASE + ans->buf[--ans->buf_offset]; + } +#if DRACO_ANS_IMPL1 + val = ans->state % DRACO_ANS_P8_PRECISION < p; + l_s = val ? p : p0; + ans->state = (ans->state / DRACO_ANS_P8_PRECISION) * l_s + + ans->state % DRACO_ANS_P8_PRECISION - (!val * p); +#else + x = ans->state; + quot = x / DRACO_ANS_P8_PRECISION; + rem = x % DRACO_ANS_P8_PRECISION; + xn = quot * p; + val = rem >= p0; + if (UNPREDICTABLE(val)) { + ans->state = xn + rem - p0; + } else { + // ans->state = quot * p0 + rem - p0; + ans->state = x - xn; + } +#endif + return val; +} + +#define rabs_read rabs_desc_read +#define rabs_write rabs_desc_write + +// uABS with normalization. +static inline void uabs_write(struct AnsCoder *ans, int val, AnsP8 p0) { + AnsP8 p = DRACO_ANS_P8_PRECISION - p0; + const unsigned l_s = val ? p : p0; + while (ans->state >= + DRACO_ANS_L_BASE / DRACO_ANS_P8_PRECISION * DRACO_ANS_IO_BASE * l_s) { + ans->buf[ans->buf_offset++] = ans->state % DRACO_ANS_IO_BASE; + ans->state /= DRACO_ANS_IO_BASE; + } + if (!val) { + ans->state = DRACO_ANS_DIV(ans->state * DRACO_ANS_P8_PRECISION, p0); + } else { + ans->state = + DRACO_ANS_DIV((ans->state + 1) * DRACO_ANS_P8_PRECISION + p - 1, p) - 1; + } +} + +static inline int uabs_read(struct AnsDecoder *ans, AnsP8 p0) { + AnsP8 p = DRACO_ANS_P8_PRECISION - p0; + int s; + // unsigned int xp1; + unsigned xp, sp; + unsigned state = ans->state; + while (state < DRACO_ANS_L_BASE && ans->buf_offset > 0) { + state = state * DRACO_ANS_IO_BASE + ans->buf[--ans->buf_offset]; + } + sp = state * p; + // xp1 = (sp + p) / DRACO_ANS_P8_PRECISION; + xp = sp / DRACO_ANS_P8_PRECISION; + // s = xp1 - xp; + s = (sp & 0xFF) >= p0; + if (UNPREDICTABLE(s)) { + ans->state = xp; + } else { + ans->state = state - xp; + } + return s; +} + +static inline int uabs_read_bit(struct AnsDecoder *ans) { + int s; + unsigned state = ans->state; + while (state < DRACO_ANS_L_BASE && ans->buf_offset > 0) { + state = state * DRACO_ANS_IO_BASE + ans->buf[--ans->buf_offset]; + } + s = static_cast(state & 1); + ans->state = state >> 1; + return s; +} + +static inline int ans_read_init(struct AnsDecoder *const ans, + const uint8_t *const buf, int offset) { + unsigned x; + if (offset < 1) { + return 1; + } + ans->buf = buf; + x = buf[offset - 1] >> 6; + if (x == 0) { + ans->buf_offset = offset - 1; + ans->state = buf[offset - 1] & 0x3F; + } else if (x == 1) { + if (offset < 2) { + return 1; + } + ans->buf_offset = offset - 2; + ans->state = mem_get_le16(buf + offset - 2) & 0x3FFF; + } else if (x == 2) { + if (offset < 3) { + return 1; + } + ans->buf_offset = offset - 3; + ans->state = mem_get_le24(buf + offset - 3) & 0x3FFFFF; + } else { + return 1; + } + ans->state += DRACO_ANS_L_BASE; + if (ans->state >= DRACO_ANS_L_BASE * DRACO_ANS_IO_BASE) { + return 1; + } + return 0; +} + +static inline int ans_read_end(struct AnsDecoder *const ans) { + return ans->state == DRACO_ANS_L_BASE; +} + +static inline int ans_reader_has_error(const struct AnsDecoder *const ans) { + return ans->state < DRACO_ANS_L_BASE && ans->buf_offset == 0; +} + +struct rans_sym { + uint32_t prob; + uint32_t cum_prob; // not-inclusive. +}; + +// Class for performing rANS encoding using a desired number of precision bits. +// The max number of precision bits is currently 19. The actual number of +// symbols in the input alphabet should be (much) smaller than that, otherwise +// the compression rate may suffer. +template +class RAnsEncoder { + public: + RAnsEncoder() {} + + // Provides the input buffer where the data is going to be stored. + inline void write_init(uint8_t *const buf) { + ans_.buf = buf; + ans_.buf_offset = 0; + ans_.state = l_rans_base; + } + + // Needs to be called after all symbols are encoded. + inline int write_end() { + uint32_t state; + DRACO_DCHECK_GE(ans_.state, l_rans_base); + DRACO_DCHECK_LT(ans_.state, l_rans_base * DRACO_ANS_IO_BASE); + state = ans_.state - l_rans_base; + if (state < (1 << 6)) { + ans_.buf[ans_.buf_offset] = (0x00 << 6) + state; + return ans_.buf_offset + 1; + } else if (state < (1 << 14)) { + mem_put_le16(ans_.buf + ans_.buf_offset, (0x01 << 14) + state); + return ans_.buf_offset + 2; + } else if (state < (1 << 22)) { + mem_put_le24(ans_.buf + ans_.buf_offset, (0x02 << 22) + state); + return ans_.buf_offset + 3; + } else if (state < (1 << 30)) { + mem_put_le32(ans_.buf + ans_.buf_offset, (0x03u << 30u) + state); + return ans_.buf_offset + 4; + } else { + DRACO_DCHECK(0 && "State is too large to be serialized"); + return ans_.buf_offset; + } + } + + // rANS with normalization. + // sym->prob takes the place of l_s from the paper. + // rans_precision is m. + inline void rans_write(const struct rans_sym *const sym) { + const uint32_t p = sym->prob; + while (ans_.state >= l_rans_base / rans_precision * DRACO_ANS_IO_BASE * p) { + ans_.buf[ans_.buf_offset++] = ans_.state % DRACO_ANS_IO_BASE; + ans_.state /= DRACO_ANS_IO_BASE; + } + // TODO(ostava): The division and multiplication should be optimized. + ans_.state = + (ans_.state / p) * rans_precision + ans_.state % p + sym->cum_prob; + } + + private: + static constexpr int rans_precision = 1 << rans_precision_bits_t; + static constexpr int l_rans_base = rans_precision * 4; + AnsCoder ans_; +}; + +struct rans_dec_sym { + uint32_t val; + uint32_t prob; + uint32_t cum_prob; // not-inclusive. +}; + +// Class for performing rANS decoding using a desired number of precision bits. +// The number of precision bits needs to be the same as with the RAnsEncoder +// that was used to encode the input data. +template +class RAnsDecoder { + public: + RAnsDecoder() {} + + // Initializes the decoder from the input buffer. The |offset| specifies the + // number of bytes encoded by the encoder. A non zero return value is an + // error. + inline int read_init(const uint8_t *const buf, int offset) { + unsigned x; + if (offset < 1) { + return 1; + } + ans_.buf = buf; + x = buf[offset - 1] >> 6; + if (x == 0) { + ans_.buf_offset = offset - 1; + ans_.state = buf[offset - 1] & 0x3F; + } else if (x == 1) { + if (offset < 2) { + return 1; + } + ans_.buf_offset = offset - 2; + ans_.state = mem_get_le16(buf + offset - 2) & 0x3FFF; + } else if (x == 2) { + if (offset < 3) { + return 1; + } + ans_.buf_offset = offset - 3; + ans_.state = mem_get_le24(buf + offset - 3) & 0x3FFFFF; + } else if (x == 3) { + ans_.buf_offset = offset - 4; + ans_.state = mem_get_le32(buf + offset - 4) & 0x3FFFFFFF; + } else { + return 1; + } + ans_.state += l_rans_base; + if (ans_.state >= l_rans_base * DRACO_ANS_IO_BASE) { + return 1; + } + return 0; + } + + inline int read_end() { return ans_.state == l_rans_base; } + + inline int reader_has_error() { + return ans_.state < l_rans_base && ans_.buf_offset == 0; + } + + inline int rans_read() { + unsigned rem; + unsigned quo; + struct rans_dec_sym sym; + while (ans_.state < l_rans_base && ans_.buf_offset > 0) { + ans_.state = ans_.state * DRACO_ANS_IO_BASE + ans_.buf[--ans_.buf_offset]; + } + // |rans_precision| is a power of two compile time constant, and the below + // division and modulo are going to be optimized by the compiler. + quo = ans_.state / rans_precision; + rem = ans_.state % rans_precision; + fetch_sym(&sym, rem); + ans_.state = quo * sym.prob + rem - sym.cum_prob; + return sym.val; + } + + // Construct a lookup table with |rans_precision| number of entries. + // Returns false if the table couldn't be built (because of wrong input data). + inline bool rans_build_look_up_table(const uint32_t token_probs[], + uint32_t num_symbols) { + lut_table_.resize(rans_precision); + probability_table_.resize(num_symbols); + uint32_t cum_prob = 0; + uint32_t act_prob = 0; + for (uint32_t i = 0; i < num_symbols; ++i) { + probability_table_[i].prob = token_probs[i]; + probability_table_[i].cum_prob = cum_prob; + cum_prob += token_probs[i]; + if (cum_prob > rans_precision) { + return false; + } + for (uint32_t j = act_prob; j < cum_prob; ++j) { + lut_table_[j] = i; + } + act_prob = cum_prob; + } + if (cum_prob != rans_precision) { + return false; + } + return true; + } + + private: + inline void fetch_sym(struct rans_dec_sym *out, uint32_t rem) { + uint32_t symbol = lut_table_[rem]; + out->val = symbol; + out->prob = probability_table_[symbol].prob; + out->cum_prob = probability_table_[symbol].cum_prob; + } + + static constexpr int rans_precision = 1 << rans_precision_bits_t; + static constexpr int l_rans_base = rans_precision * 4; + std::vector lut_table_; + std::vector probability_table_; + AnsDecoder ans_; +}; + +#undef DRACO_ANS_DIVREM +#undef DRACO_ANS_P8_PRECISION +#undef DRACO_ANS_L_BASE +#undef DRACO_ANS_IO_BASE + +} // namespace draco + +#endif // DRACO_COMPRESSION_ENTROPY_ANS_H_ diff --git a/contrib/draco/src/draco/compression/entropy/rans_symbol_coding.h b/contrib/draco/src/draco/compression/entropy/rans_symbol_coding.h new file mode 100644 index 000000000..cd4271193 --- /dev/null +++ b/contrib/draco/src/draco/compression/entropy/rans_symbol_coding.h @@ -0,0 +1,53 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File providing shared functionality for RAnsSymbolEncoder and +// RAnsSymbolDecoder (see rans_symbol_encoder.h / rans_symbol_decoder.h). +#ifndef DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_CODING_H_ +#define DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_CODING_H_ + +#include "draco/compression/entropy/ans.h" + +namespace draco { + +// Computes the desired precision of the rANS method for the specified number of +// unique symbols the input data (defined by their bit_length). +constexpr int ComputeRAnsUnclampedPrecision(int symbols_bit_length) { + return (3 * symbols_bit_length) / 2; +} + +// Computes the desired precision clamped to guarantee a valid functionality of +// our rANS library (which is between 12 to 20 bits). +constexpr int ComputeRAnsPrecisionFromUniqueSymbolsBitLength( + int symbols_bit_length) { + return ComputeRAnsUnclampedPrecision(symbols_bit_length) < 12 ? 12 + : ComputeRAnsUnclampedPrecision(symbols_bit_length) > 20 + ? 20 + : ComputeRAnsUnclampedPrecision(symbols_bit_length); +} + +// Compute approximate frequency table size needed for storing the provided +// symbols. +static inline int64_t ApproximateRAnsFrequencyTableBits( + int32_t max_value, int num_unique_symbols) { + // Approximate number of bits for storing zero frequency entries using the + // run length encoding (with max length of 64). + const int64_t table_zero_frequency_bits = + 8 * (num_unique_symbols + (max_value - num_unique_symbols) / 64); + return 8 * num_unique_symbols + table_zero_frequency_bits; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_CODING_H_ diff --git a/contrib/draco/src/draco/compression/entropy/rans_symbol_decoder.h b/contrib/draco/src/draco/compression/entropy/rans_symbol_decoder.h new file mode 100644 index 000000000..10cdc6781 --- /dev/null +++ b/contrib/draco/src/draco/compression/entropy/rans_symbol_decoder.h @@ -0,0 +1,164 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_DECODER_H_ +#define DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_DECODER_H_ + +#include "draco/compression/config/compression_shared.h" +#include "draco/compression/entropy/rans_symbol_coding.h" +#include "draco/core/decoder_buffer.h" +#include "draco/core/varint_decoding.h" +#include "draco/draco_features.h" + +namespace draco { + +// A helper class for decoding symbols using the rANS algorithm (see ans.h). +// The class can be used to decode the probability table and the data encoded +// by the RAnsSymbolEncoder. |unique_symbols_bit_length_t| must be the same as +// the one used for the corresponding RAnsSymbolEncoder. +template +class RAnsSymbolDecoder { + public: + RAnsSymbolDecoder() : num_symbols_(0) {} + + // Initialize the decoder and decode the probability table. + bool Create(DecoderBuffer *buffer); + + uint32_t num_symbols() const { return num_symbols_; } + + // Starts decoding from the buffer. The buffer will be advanced past the + // encoded data after this call. + bool StartDecoding(DecoderBuffer *buffer); + uint32_t DecodeSymbol() { return ans_.rans_read(); } + void EndDecoding(); + + private: + static constexpr int rans_precision_bits_ = + ComputeRAnsPrecisionFromUniqueSymbolsBitLength( + unique_symbols_bit_length_t); + static constexpr int rans_precision_ = 1 << rans_precision_bits_; + + std::vector probability_table_; + uint32_t num_symbols_; + RAnsDecoder ans_; +}; + +template +bool RAnsSymbolDecoder::Create( + DecoderBuffer *buffer) { + // Check that the DecoderBuffer version is set. + if (buffer->bitstream_version() == 0) { + return false; + } + // Decode the number of alphabet symbols. +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) { + if (!buffer->Decode(&num_symbols_)) { + return false; + } + + } else +#endif + { + if (!DecodeVarint(&num_symbols_, buffer)) { + return false; + } + } + probability_table_.resize(num_symbols_); + if (num_symbols_ == 0) { + return true; + } + // Decode the table. + for (uint32_t i = 0; i < num_symbols_; ++i) { + uint8_t prob_data = 0; + // Decode the first byte and extract the number of extra bytes we need to + // get, or the offset to the next symbol with non-zero probability. + if (!buffer->Decode(&prob_data)) { + return false; + } + // Token is stored in the first two bits of the first byte. Values 0-2 are + // used to indicate the number of extra bytes, and value 3 is a special + // symbol used to denote run-length coding of zero probability entries. + // See rans_symbol_encoder.h for more details. + const int token = prob_data & 3; + if (token == 3) { + const uint32_t offset = prob_data >> 2; + if (i + offset >= num_symbols_) { + return false; + } + // Set zero probability for all symbols in the specified range. + for (uint32_t j = 0; j < offset + 1; ++j) { + probability_table_[i + j] = 0; + } + i += offset; + } else { + const int extra_bytes = token; + uint32_t prob = prob_data >> 2; + for (int b = 0; b < extra_bytes; ++b) { + uint8_t eb; + if (!buffer->Decode(&eb)) { + return false; + } + // Shift 8 bits for each extra byte and subtract 2 for the two first + // bits. + prob |= static_cast(eb) << (8 * (b + 1) - 2); + } + probability_table_[i] = prob; + } + } + if (!ans_.rans_build_look_up_table(&probability_table_[0], num_symbols_)) { + return false; + } + return true; +} + +template +bool RAnsSymbolDecoder::StartDecoding( + DecoderBuffer *buffer) { + uint64_t bytes_encoded; + // Decode the number of bytes encoded by the encoder. +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) { + if (!buffer->Decode(&bytes_encoded)) { + return false; + } + + } else +#endif + { + if (!DecodeVarint(&bytes_encoded, buffer)) { + return false; + } + } + if (bytes_encoded > static_cast(buffer->remaining_size())) { + return false; + } + const uint8_t *const data_head = + reinterpret_cast(buffer->data_head()); + // Advance the buffer past the rANS data. + buffer->Advance(bytes_encoded); + if (ans_.read_init(data_head, static_cast(bytes_encoded)) != 0) { + return false; + } + return true; +} + +template +void RAnsSymbolDecoder::EndDecoding() { + ans_.read_end(); +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/entropy/rans_symbol_encoder.h b/contrib/draco/src/draco/compression/entropy/rans_symbol_encoder.h new file mode 100644 index 000000000..4e07ec871 --- /dev/null +++ b/contrib/draco/src/draco/compression/entropy/rans_symbol_encoder.h @@ -0,0 +1,290 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_ENCODER_H_ +#define DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_ENCODER_H_ + +#include +#include +#include + +#include "draco/compression/entropy/ans.h" +#include "draco/compression/entropy/rans_symbol_coding.h" +#include "draco/core/encoder_buffer.h" +#include "draco/core/varint_encoding.h" + +namespace draco { + +// A helper class for encoding symbols using the rANS algorithm (see ans.h). +// The class can be used to initialize and encode probability table needed by +// rANS, and to perform encoding of symbols into the provided EncoderBuffer. +template +class RAnsSymbolEncoder { + public: + RAnsSymbolEncoder() + : num_symbols_(0), num_expected_bits_(0), buffer_offset_(0) {} + + // Creates a probability table needed by the rANS library and encode it into + // the provided buffer. + bool Create(const uint64_t *frequencies, int num_symbols, + EncoderBuffer *buffer); + + void StartEncoding(EncoderBuffer *buffer); + void EncodeSymbol(uint32_t symbol) { + ans_.rans_write(&probability_table_[symbol]); + } + void EndEncoding(EncoderBuffer *buffer); + + // rANS requires to encode the input symbols in the reverse order. + static constexpr bool needs_reverse_encoding() { return true; } + + private: + // Functor used for sorting symbol ids according to their probabilities. + // The functor sorts symbol indices that index an underlying map between + // symbol ids and their probabilities. We don't sort the probability table + // directly, because that would require an additional indirection during the + // EncodeSymbol() function. + struct ProbabilityLess { + explicit ProbabilityLess(const std::vector *probs) + : probabilities(probs) {} + bool operator()(int i, int j) const { + return probabilities->at(i).prob < probabilities->at(j).prob; + } + const std::vector *probabilities; + }; + + // Encodes the probability table into the output buffer. + bool EncodeTable(EncoderBuffer *buffer); + + static constexpr int rans_precision_bits_ = + ComputeRAnsPrecisionFromUniqueSymbolsBitLength( + unique_symbols_bit_length_t); + static constexpr int rans_precision_ = 1 << rans_precision_bits_; + + std::vector probability_table_; + // The number of symbols in the input alphabet. + uint32_t num_symbols_; + // Expected number of bits that is needed to encode the input. + uint64_t num_expected_bits_; + + RAnsEncoder ans_; + // Initial offset of the encoder buffer before any ans data was encoded. + uint64_t buffer_offset_; +}; + +template +bool RAnsSymbolEncoder::Create( + const uint64_t *frequencies, int num_symbols, EncoderBuffer *buffer) { + // Compute the total of the input frequencies. + uint64_t total_freq = 0; + int max_valid_symbol = 0; + for (int i = 0; i < num_symbols; ++i) { + total_freq += frequencies[i]; + if (frequencies[i] > 0) { + max_valid_symbol = i; + } + } + num_symbols = max_valid_symbol + 1; + num_symbols_ = num_symbols; + probability_table_.resize(num_symbols); + const double total_freq_d = static_cast(total_freq); + const double rans_precision_d = static_cast(rans_precision_); + // Compute probabilities by rescaling the normalized frequencies into interval + // [1, rans_precision - 1]. The total probability needs to be equal to + // rans_precision. + int total_rans_prob = 0; + for (int i = 0; i < num_symbols; ++i) { + const uint64_t freq = frequencies[i]; + + // Normalized probability. + const double prob = static_cast(freq) / total_freq_d; + + // RAns probability in range of [1, rans_precision - 1]. + uint32_t rans_prob = static_cast(prob * rans_precision_d + 0.5f); + if (rans_prob == 0 && freq > 0) { + rans_prob = 1; + } + probability_table_[i].prob = rans_prob; + total_rans_prob += rans_prob; + } + // Because of rounding errors, the total precision may not be exactly accurate + // and we may need to adjust the entries a little bit. + if (total_rans_prob != rans_precision_) { + std::vector sorted_probabilities(num_symbols); + for (int i = 0; i < num_symbols; ++i) { + sorted_probabilities[i] = i; + } + std::sort(sorted_probabilities.begin(), sorted_probabilities.end(), + ProbabilityLess(&probability_table_)); + if (total_rans_prob < rans_precision_) { + // This happens rather infrequently, just add the extra needed precision + // to the most frequent symbol. + probability_table_[sorted_probabilities.back()].prob += + rans_precision_ - total_rans_prob; + } else { + // We have over-allocated the precision, which is quite common. + // Rescale the probabilities of all symbols. + int32_t error = total_rans_prob - rans_precision_; + while (error > 0) { + const double act_total_prob_d = static_cast(total_rans_prob); + const double act_rel_error_d = rans_precision_d / act_total_prob_d; + for (int j = num_symbols - 1; j > 0; --j) { + int symbol_id = sorted_probabilities[j]; + if (probability_table_[symbol_id].prob <= 1) { + if (j == num_symbols - 1) { + return false; // Most frequent symbol would be empty. + } + break; + } + const int32_t new_prob = static_cast( + floor(act_rel_error_d * + static_cast(probability_table_[symbol_id].prob))); + int32_t fix = probability_table_[symbol_id].prob - new_prob; + if (fix == 0u) { + fix = 1; + } + if (fix >= static_cast(probability_table_[symbol_id].prob)) { + fix = probability_table_[symbol_id].prob - 1; + } + if (fix > error) { + fix = error; + } + probability_table_[symbol_id].prob -= fix; + total_rans_prob -= fix; + error -= fix; + if (total_rans_prob == rans_precision_) { + break; + } + } + } + } + } + + // Compute the cumulative probability (cdf). + uint32_t total_prob = 0; + for (int i = 0; i < num_symbols; ++i) { + probability_table_[i].cum_prob = total_prob; + total_prob += probability_table_[i].prob; + } + if (total_prob != rans_precision_) { + return false; + } + + // Estimate the number of bits needed to encode the input. + // From Shannon entropy the total number of bits N is: + // N = -sum{i : all_symbols}(F(i) * log2(P(i))) + // where P(i) is the normalized probability of symbol i and F(i) is the + // symbol's frequency in the input data. + double num_bits = 0; + for (int i = 0; i < num_symbols; ++i) { + if (probability_table_[i].prob == 0) { + continue; + } + const double norm_prob = + static_cast(probability_table_[i].prob) / rans_precision_d; + num_bits += static_cast(frequencies[i]) * log2(norm_prob); + } + num_expected_bits_ = static_cast(ceil(-num_bits)); + if (!EncodeTable(buffer)) { + return false; + } + return true; +} + +template +bool RAnsSymbolEncoder::EncodeTable( + EncoderBuffer *buffer) { + EncodeVarint(num_symbols_, buffer); + // Use varint encoding for the probabilities (first two bits represent the + // number of bytes used - 1). + for (uint32_t i = 0; i < num_symbols_; ++i) { + const uint32_t prob = probability_table_[i].prob; + int num_extra_bytes = 0; + if (prob >= (1 << 6)) { + num_extra_bytes++; + if (prob >= (1 << 14)) { + num_extra_bytes++; + if (prob >= (1 << 22)) { + // The maximum number of precision bits is 20 so we should not really + // get to this point. + return false; + } + } + } + if (prob == 0) { + // When the probability of the symbol is 0, set the first two bits to 1 + // (unique identifier) and use the remaining 6 bits to store the offset + // to the next symbol with non-zero probability. + uint32_t offset = 0; + for (; offset < (1 << 6) - 1; ++offset) { + // Note: we don't have to check whether the next symbol id is larger + // than num_symbols_ because we know that the last symbol always has + // non-zero probability. + const uint32_t next_prob = probability_table_[i + offset + 1].prob; + if (next_prob > 0) { + break; + } + } + buffer->Encode(static_cast((offset << 2) | 3)); + i += offset; + } else { + // Encode the first byte (including the number of extra bytes). + buffer->Encode(static_cast((prob << 2) | (num_extra_bytes & 3))); + // Encode the extra bytes. + for (int b = 0; b < num_extra_bytes; ++b) { + buffer->Encode(static_cast(prob >> (8 * (b + 1) - 2))); + } + } + } + return true; +} + +template +void RAnsSymbolEncoder::StartEncoding( + EncoderBuffer *buffer) { + // Allocate extra storage just in case. + const uint64_t required_bits = 2 * num_expected_bits_ + 32; + + buffer_offset_ = buffer->size(); + const int64_t required_bytes = (required_bits + 7) / 8; + buffer->Resize(buffer_offset_ + required_bytes + sizeof(buffer_offset_)); + uint8_t *const data = + reinterpret_cast(const_cast(buffer->data())); + ans_.write_init(data + buffer_offset_); +} + +template +void RAnsSymbolEncoder::EndEncoding( + EncoderBuffer *buffer) { + char *const src = const_cast(buffer->data()) + buffer_offset_; + + // TODO(fgalligan): Look into changing this to uint32_t as write_end() + // returns an int. + const uint64_t bytes_written = static_cast(ans_.write_end()); + EncoderBuffer var_size_buffer; + EncodeVarint(bytes_written, &var_size_buffer); + const uint32_t size_len = static_cast(var_size_buffer.size()); + char *const dst = src + size_len; + memmove(dst, src, bytes_written); + + // Store the size of the encoded data. + memcpy(src, var_size_buffer.data(), size_len); + + // Resize the buffer to match the number of encoded bytes. + buffer->Resize(buffer_offset_ + bytes_written + size_len); +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/entropy/shannon_entropy.cc b/contrib/draco/src/draco/compression/entropy/shannon_entropy.cc new file mode 100644 index 000000000..137eafe5f --- /dev/null +++ b/contrib/draco/src/draco/compression/entropy/shannon_entropy.cc @@ -0,0 +1,147 @@ +#include "draco/compression/entropy/shannon_entropy.h" + +#include +#include + +#include "draco/compression/entropy/rans_symbol_coding.h" + +namespace draco { + +int64_t ComputeShannonEntropy(const uint32_t *symbols, int num_symbols, + int max_value, int *out_num_unique_symbols) { + // First find frequency of all unique symbols in the input array. + int num_unique_symbols = 0; + std::vector symbol_frequencies(max_value + 1, 0); + for (int i = 0; i < num_symbols; ++i) { + ++symbol_frequencies[symbols[i]]; + } + double total_bits = 0; + double num_symbols_d = num_symbols; + for (int i = 0; i < max_value + 1; ++i) { + if (symbol_frequencies[i] > 0) { + ++num_unique_symbols; + // Compute Shannon entropy for the symbol. + // We don't want to use std::log2 here for Android build. + total_bits += + symbol_frequencies[i] * + log2(static_cast(symbol_frequencies[i]) / num_symbols_d); + } + } + if (out_num_unique_symbols) { + *out_num_unique_symbols = num_unique_symbols; + } + // Entropy is always negative. + return static_cast(-total_bits); +} + +double ComputeBinaryShannonEntropy(uint32_t num_values, + uint32_t num_true_values) { + if (num_values == 0) { + return 0; + } + + // We can exit early if the data set has 0 entropy. + if (num_true_values == 0 || num_values == num_true_values) { + return 0; + } + const double true_freq = + static_cast(num_true_values) / static_cast(num_values); + const double false_freq = 1.0 - true_freq; + return -(true_freq * std::log2(true_freq) + + false_freq * std::log2(false_freq)); +} + +ShannonEntropyTracker::ShannonEntropyTracker() {} + +ShannonEntropyTracker::EntropyData ShannonEntropyTracker::Peek( + const uint32_t *symbols, int num_symbols) { + return UpdateSymbols(symbols, num_symbols, false); +} + +ShannonEntropyTracker::EntropyData ShannonEntropyTracker::Push( + const uint32_t *symbols, int num_symbols) { + return UpdateSymbols(symbols, num_symbols, true); +} + +ShannonEntropyTracker::EntropyData ShannonEntropyTracker::UpdateSymbols( + const uint32_t *symbols, int num_symbols, bool push_changes) { + EntropyData ret_data = entropy_data_; + ret_data.num_values += num_symbols; + for (int i = 0; i < num_symbols; ++i) { + const uint32_t symbol = symbols[i]; + if (frequencies_.size() <= symbol) { + frequencies_.resize(symbol + 1, 0); + } + + // Update the entropy of the stream. Note that entropy of |N| values + // represented by |S| unique symbols is defined as: + // + // entropy = -sum_over_S(symbol_frequency / N * log2(symbol_frequency / N)) + // + // To avoid the need to recompute the entire sum when new values are added, + // we can instead update a so called entropy norm that is defined as: + // + // entropy_norm = sum_over_S(symbol_frequency * log2(symbol_frequency)) + // + // In this case, all we need to do is update entries on the symbols where + // the frequency actually changed. + // + // Note that entropy_norm and entropy can be easily transformed to the + // actual entropy as: + // + // entropy = log2(N) - entropy_norm / N + // + double old_symbol_entropy_norm = 0; + int &frequency = frequencies_[symbol]; + if (frequency > 1) { + old_symbol_entropy_norm = frequency * std::log2(frequency); + } else if (frequency == 0) { + ret_data.num_unique_symbols++; + if (symbol > static_cast(ret_data.max_symbol)) { + ret_data.max_symbol = symbol; + } + } + frequency++; + const double new_symbol_entropy_norm = frequency * std::log2(frequency); + + // Update the final entropy. + ret_data.entropy_norm += new_symbol_entropy_norm - old_symbol_entropy_norm; + } + if (push_changes) { + // Update entropy data of the stream. + entropy_data_ = ret_data; + } else { + // We are only peeking so do not update the stream. + // Revert changes in the frequency table. + for (int i = 0; i < num_symbols; ++i) { + const uint32_t symbol = symbols[i]; + frequencies_[symbol]--; + } + } + return ret_data; +} + +int64_t ShannonEntropyTracker::GetNumberOfDataBits( + const EntropyData &entropy_data) { + if (entropy_data.num_values < 2) { + return 0; + } + // We need to compute the number of bits required to represent the stream + // using the entropy norm. Note that: + // + // entropy = log2(num_values) - entropy_norm / num_values + // + // and number of bits required for the entropy is: num_values * entropy + // + return static_cast( + ceil(entropy_data.num_values * std::log2(entropy_data.num_values) - + entropy_data.entropy_norm)); +} + +int64_t ShannonEntropyTracker::GetNumberOfRAnsTableBits( + const EntropyData &entropy_data) { + return ApproximateRAnsFrequencyTableBits(entropy_data.max_symbol + 1, + entropy_data.num_unique_symbols); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/entropy/shannon_entropy.h b/contrib/draco/src/draco/compression/entropy/shannon_entropy.h new file mode 100644 index 000000000..85165f4cb --- /dev/null +++ b/contrib/draco/src/draco/compression/entropy/shannon_entropy.h @@ -0,0 +1,110 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ENTROPY_SHANNON_ENTROPY_H_ +#define DRACO_COMPRESSION_ENTROPY_SHANNON_ENTROPY_H_ + +#include + +#include + +namespace draco { + +// Computes an approximate Shannon entropy of symbols stored in the provided +// input array |symbols|. The entropy corresponds to the number of bits that is +// required to represent/store all the symbols using an optimal entropy coding +// algorithm. See for example "A mathematical theory of communication" by +// Shannon'48 (http://ieeexplore.ieee.org/document/6773024/). +// +// |max_value| is a required input that define the maximum value in the input +// |symbols| array. +// +// |out_num_unique_symbols| is an optional output argument that stores the +// number of unique symbols contained within the |symbols| array. +// TODO(ostava): This should be renamed or the return value should be changed to +// return the actual entropy and not the number of bits needed to represent the +// input symbols. +int64_t ComputeShannonEntropy(const uint32_t *symbols, int num_symbols, + int max_value, int *out_num_unique_symbols); + +// Computes the Shannon entropy of |num_values| Boolean entries, where +// |num_true_values| are set to true. +// Returns entropy between 0-1. +double ComputeBinaryShannonEntropy(uint32_t num_values, + uint32_t num_true_values); + +// Class that can be used to keep track of the Shannon entropy on streamed data. +// As new symbols are pushed to the tracker, the entropy is automatically +// recomputed. The class also support recomputing the entropy without actually +// pushing the symbols to the tracker through the Peek() method. +class ShannonEntropyTracker { + public: + ShannonEntropyTracker(); + + // Struct for holding entropy data about the symbols added to the tracker. + // It can be used to compute the number of bits needed to store the data using + // the method: + // ShannonEntropyTracker::GetNumberOfDataBits(entropy_data); + // or to compute the approximate size of the frequency table needed by the + // rans coding using method: + // ShannonEntropyTracker::GetNumberOfRAnsTableBits(entropy_data); + struct EntropyData { + double entropy_norm; + int num_values; + int max_symbol; + int num_unique_symbols; + EntropyData() + : entropy_norm(0.0), + num_values(0), + max_symbol(0), + num_unique_symbols(0) {} + }; + + // Adds new symbols to the tracker and recomputes the entropy accordingly. + EntropyData Push(const uint32_t *symbols, int num_symbols); + + // Returns new entropy data for the tracker as if |symbols| were added to the + // tracker without actually changing the status of the tracker. + EntropyData Peek(const uint32_t *symbols, int num_symbols); + + // Gets the number of bits needed for encoding symbols added to the tracker. + int64_t GetNumberOfDataBits() const { + return GetNumberOfDataBits(entropy_data_); + } + + // Gets the number of bits needed for encoding frequency table using the rans + // encoder. + int64_t GetNumberOfRAnsTableBits() const { + return GetNumberOfRAnsTableBits(entropy_data_); + } + + // Gets the number of bits needed for encoding given |entropy_data|. + static int64_t GetNumberOfDataBits(const EntropyData &entropy_data); + + // Gets the number of bits needed for encoding frequency table using the rans + // encoder for the given |entropy_data|. + static int64_t GetNumberOfRAnsTableBits(const EntropyData &entropy_data); + + private: + EntropyData UpdateSymbols(const uint32_t *symbols, int num_symbols, + bool push_changes); + + std::vector frequencies_; + + EntropyData entropy_data_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ENTROPY_SHANNON_ENTROPY_H_ diff --git a/contrib/draco/src/draco/compression/entropy/shannon_entropy_test.cc b/contrib/draco/src/draco/compression/entropy/shannon_entropy_test.cc new file mode 100644 index 000000000..732c7d2fb --- /dev/null +++ b/contrib/draco/src/draco/compression/entropy/shannon_entropy_test.cc @@ -0,0 +1,58 @@ +#include "draco/compression/entropy/shannon_entropy.h" + +#include "draco/core/draco_test_base.h" + +namespace { + +TEST(ShannonEntropyTest, TestBinaryEntropy) { + // Test verifies that computing binary entropy works as expected. + ASSERT_EQ(draco::ComputeBinaryShannonEntropy(0, 0), 0); + ASSERT_EQ(draco::ComputeBinaryShannonEntropy(10, 0), 0); + ASSERT_EQ(draco::ComputeBinaryShannonEntropy(10, 10), 0); + ASSERT_NEAR(draco::ComputeBinaryShannonEntropy(10, 5), 1.0, 1e-4); +} + +TEST(ShannonEntropyTest, TestStreamEntropy) { + // Test verifies that the entropy of streamed data is computed correctly. + const std::vector symbols = {1, 5, 1, 100, 2, 1}; + + draco::ShannonEntropyTracker entropy_tracker; + + // Nothing added, 0 entropy. + ASSERT_EQ(entropy_tracker.GetNumberOfDataBits(), 0); + + // Try to push symbols one by one. + uint32_t max_symbol = 0; + for (int i = 0; i < symbols.size(); ++i) { + if (symbols[i] > max_symbol) { + max_symbol = symbols[i]; + } + const auto entropy_data = entropy_tracker.Push(&symbols[i], 1); + + const int64_t stream_entropy_bits = entropy_tracker.GetNumberOfDataBits(); + // Ensure the returned entropy_data is in sync with the stream. + ASSERT_EQ(draco::ShannonEntropyTracker::GetNumberOfDataBits(entropy_data), + stream_entropy_bits); + + // Make sure the entropy is approximately the same as the one we compute + // directly from all symbols. + const int64_t expected_entropy_bits = draco::ComputeShannonEntropy( + symbols.data(), i + 1, max_symbol, nullptr); + + // For now hardcoded tolerance of 2 bits. + ASSERT_NEAR(expected_entropy_bits, stream_entropy_bits, 2); + } + + // Compare it also to the case when we add all symbols in one call. + draco::ShannonEntropyTracker entropy_tracker_2; + entropy_tracker_2.Push(symbols.data(), symbols.size()); + const int64_t stream_2_entropy_bits = entropy_tracker_2.GetNumberOfDataBits(); + ASSERT_EQ(entropy_tracker.GetNumberOfDataBits(), stream_2_entropy_bits); + + // Ensure that peeking does not change the entropy. + entropy_tracker_2.Peek(symbols.data(), 1); + + ASSERT_EQ(stream_2_entropy_bits, entropy_tracker_2.GetNumberOfDataBits()); +} + +} // namespace diff --git a/contrib/draco/src/draco/compression/entropy/symbol_coding_test.cc b/contrib/draco/src/draco/compression/entropy/symbol_coding_test.cc new file mode 100644 index 000000000..ba7166bbe --- /dev/null +++ b/contrib/draco/src/draco/compression/entropy/symbol_coding_test.cc @@ -0,0 +1,170 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/config/compression_shared.h" +#include "draco/compression/entropy/symbol_decoding.h" +#include "draco/compression/entropy/symbol_encoding.h" +#include "draco/core/bit_utils.h" +#include "draco/core/decoder_buffer.h" +#include "draco/core/draco_test_base.h" +#include "draco/core/encoder_buffer.h" + +namespace draco { + +class SymbolCodingTest : public ::testing::Test { + protected: + SymbolCodingTest() : bitstream_version_(kDracoMeshBitstreamVersion) {} + + template + void TestConvertToSymbolAndBack(SignedIntTypeT x) { + typedef typename std::make_unsigned::type Symbol; + Symbol symbol = ConvertSignedIntToSymbol(x); + SignedIntTypeT y = ConvertSymbolToSignedInt(symbol); + ASSERT_EQ(x, y); + } + + uint16_t bitstream_version_; +}; + +TEST_F(SymbolCodingTest, TestLargeNumbers) { + // This test verifies that SymbolCoding successfully encodes an array of large + // numbers. + const uint32_t in[] = {12345678, 1223333, 111, 5}; + const int num_values = sizeof(in) / sizeof(uint32_t); + EncoderBuffer eb; + ASSERT_TRUE(EncodeSymbols(in, num_values, 1, nullptr, &eb)); + + std::vector out; + out.resize(num_values); + DecoderBuffer db; + db.Init(eb.data(), eb.size()); + db.set_bitstream_version(bitstream_version_); + ASSERT_TRUE(DecodeSymbols(num_values, 1, &db, &out[0])); + for (int i = 0; i < num_values; ++i) { + EXPECT_EQ(in[i], out[i]); + } +} + +TEST_F(SymbolCodingTest, TestManyNumbers) { + // This test verifies that SymbolCoding successfully encodes an array of + // several numbers that repeat many times. + + // Value/frequency pairs. + const std::pair in[] = { + {12, 1500}, {1025, 31000}, {7, 1}, {9, 5}, {0, 6432}}; + + const int num_pairs = sizeof(in) / sizeof(std::pair); + + std::vector in_values; + for (int i = 0; i < num_pairs; ++i) { + in_values.insert(in_values.end(), in[i].second, in[i].first); + } + for (int method = 0; method < NUM_SYMBOL_CODING_METHODS; ++method) { + // Test the encoding using all available symbol coding methods. + Options options; + SetSymbolEncodingMethod(&options, static_cast(method)); + + EncoderBuffer eb; + ASSERT_TRUE( + EncodeSymbols(in_values.data(), in_values.size(), 1, &options, &eb)); + std::vector out_values; + out_values.resize(in_values.size()); + DecoderBuffer db; + db.Init(eb.data(), eb.size()); + db.set_bitstream_version(bitstream_version_); + ASSERT_TRUE(DecodeSymbols(in_values.size(), 1, &db, &out_values[0])); + for (uint32_t i = 0; i < in_values.size(); ++i) { + ASSERT_EQ(in_values[i], out_values[i]); + } + } +} + +TEST_F(SymbolCodingTest, TestEmpty) { + // This test verifies that SymbolCoding successfully encodes an empty array. + EncoderBuffer eb; + ASSERT_TRUE(EncodeSymbols(nullptr, 0, 1, nullptr, &eb)); + DecoderBuffer db; + db.Init(eb.data(), eb.size()); + db.set_bitstream_version(bitstream_version_); + ASSERT_TRUE(DecodeSymbols(0, 1, &db, nullptr)); +} + +TEST_F(SymbolCodingTest, TestOneSymbol) { + // This test verifies that SymbolCoding successfully encodes an a single + // symbol. + EncoderBuffer eb; + const std::vector in(1200, 0); + ASSERT_TRUE(EncodeSymbols(in.data(), in.size(), 1, nullptr, &eb)); + + std::vector out(in.size()); + DecoderBuffer db; + db.Init(eb.data(), eb.size()); + db.set_bitstream_version(bitstream_version_); + ASSERT_TRUE(DecodeSymbols(in.size(), 1, &db, &out[0])); + for (uint32_t i = 0; i < in.size(); ++i) { + ASSERT_EQ(in[i], out[i]); + } +} + +TEST_F(SymbolCodingTest, TestBitLengths) { + // This test verifies that SymbolCoding successfully encodes symbols of + // various bit lengths + EncoderBuffer eb; + std::vector in; + constexpr int bit_lengths = 18; + for (int i = 0; i < bit_lengths; ++i) { + in.push_back(1 << i); + } + std::vector out(in.size()); + for (int i = 0; i < bit_lengths; ++i) { + eb.Clear(); + ASSERT_TRUE(EncodeSymbols(in.data(), i + 1, 1, nullptr, &eb)); + DecoderBuffer db; + db.Init(eb.data(), eb.size()); + db.set_bitstream_version(bitstream_version_); + ASSERT_TRUE(DecodeSymbols(i + 1, 1, &db, &out[0])); + for (int j = 0; j < i + 1; ++j) { + ASSERT_EQ(in[j], out[j]); + } + } +} + +TEST_F(SymbolCodingTest, TestLargeNumberCondition) { + // This test verifies that SymbolCoding successfully encodes large symbols + // that are on the boundary between raw scheme and tagged scheme (18 bits). + EncoderBuffer eb; + constexpr int num_symbols = 1000000; + const std::vector in(num_symbols, 1 << 18); + ASSERT_TRUE(EncodeSymbols(in.data(), in.size(), 1, nullptr, &eb)); + + std::vector out(in.size()); + DecoderBuffer db; + db.Init(eb.data(), eb.size()); + db.set_bitstream_version(bitstream_version_); + ASSERT_TRUE(DecodeSymbols(in.size(), 1, &db, &out[0])); + for (uint32_t i = 0; i < in.size(); ++i) { + ASSERT_EQ(in[i], out[i]); + } +} + +TEST_F(SymbolCodingTest, TestConversionFullRange) { + TestConvertToSymbolAndBack(static_cast(-128)); + TestConvertToSymbolAndBack(static_cast(-127)); + TestConvertToSymbolAndBack(static_cast(-1)); + TestConvertToSymbolAndBack(static_cast(0)); + TestConvertToSymbolAndBack(static_cast(1)); + TestConvertToSymbolAndBack(static_cast(127)); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/entropy/symbol_decoding.cc b/contrib/draco/src/draco/compression/entropy/symbol_decoding.cc new file mode 100644 index 000000000..93d29971c --- /dev/null +++ b/contrib/draco/src/draco/compression/entropy/symbol_decoding.cc @@ -0,0 +1,181 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/entropy/symbol_decoding.h" + +#include +#include + +#include "draco/compression/entropy/rans_symbol_decoder.h" + +namespace draco { + +template