Browse Source

ketle job perday

root 5 years ago
parent
commit
c8bd89a89f
5 changed files with 1065 additions and 538 deletions
  1. 9 9
      mem_Order_etl.ktr
  2. 1029 0
      history/mem_Order_product_etl.ktr
  3. 0 0
      history/mem_memberterminal.ktr
  4. 0 517
      mem_order_page.kjb
  5. 27 12
      perday/mem_Order_perday_etl.ktr

+ 9 - 9
mem_Order_etl.ktr

@@ -408,7 +408,7 @@
       <offset>0.0</offset>
       <maxdiff>0.0</maxdiff>
     </maxdate>
-    <size_rowset>1</size_rowset>
+    <size_rowset>2</size_rowset>
     <sleep_time_empty>50</sleep_time_empty>
     <sleep_time_full>50</sleep_time_full>
     <unique_connections>N</unique_connections>
@@ -524,15 +524,15 @@
     <description/>
     <distribute>Y</distribute>
     <custom_distribution/>
-    <copies>20</copies>
+    <copies>1</copies>
     <partitioning>
       <method>none</method>
       <schema_name/>
     </partitioning>
     <general>
-      <index>crm_order</index>
-      <type>_doc?routing=%{MemKey}</type>
-      <batchSize>3</batchSize>
+      <index>crm_order_routingphone</index>
+      <type>_doc?routing=%{MemPhone}</type>
+      <batchSize>100</batchSize>
       <timeout>100</timeout>
       <timeoutUnit>SECONDS</timeoutUnit>
       <isJson>N</isJson>
@@ -1133,8 +1133,8 @@ SELECT
 , ConfirmId	
 FROM Orders a
 where 1=1 and (AconsigneePhone1!='' or AconsigneePhone2!='')
---order by AConsigneePhone2 asc 
- --offset 0 rows fetch NEXT  2 rows only
+order by AConsigneePhone2 asc 
+ --offset 4600000 rows --fetch NEXT  2 rows only
  
 --)a
 --left join MemberData..DataImport as b on a.MemKey = b.Id
@@ -1149,9 +1149,9 @@ where 1=1 and (AconsigneePhone1!='' or AconsigneePhone2!='')
 --1=1  --and b.DataId is not null 
 --and MemKey >((${page}-1)*${pagesize})  and MemKey &lt;= ((${page}-1)*${pagesize} + (${pagesize})) 
 --and MemKey >0  and MemKey &lt;= 10
-order by AConsigneePhone2 asc 
+--order by AConsigneePhone2 asc 
 --offset ((${page}-1)*${pagesize}) rows fetch next (${pagesize}) rows only
-offset ((389-1)*5000) rows --fetch next (5000) rows only
+--offset ((389-1)*5000) rows --fetch next (5000) rows only
 </sql>
     <limit>0</limit>
     <lookup/>

File diff suppressed because it is too large
+ 1029 - 0
history/mem_Order_product_etl.ktr


mem_memberterminal.ktr → history/mem_memberterminal.ktr


File diff suppressed because it is too large
+ 0 - 517
mem_order_page.kjb


+ 27 - 12
perday/mem_Order_perday_etl.ktr

@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <transformation>
   <info>
-    <name>mem_Order_perday_etl</name>
+    <name>mem_Order_etl</name>
     <description/>
     <extended_description/>
     <trans_version/>
@@ -408,7 +408,7 @@
       <offset>0.0</offset>
       <maxdiff>0.0</maxdiff>
     </maxdate>
-    <size_rowset>1</size_rowset>
+    <size_rowset>2</size_rowset>
     <sleep_time_empty>50</sleep_time_empty>
     <sleep_time_full>50</sleep_time_full>
     <unique_connections>N</unique_connections>
@@ -449,6 +449,14 @@
     <data_tablespace/>
     <index_tablespace/>
     <attributes>
+      <attribute>
+        <code>EXTRA_OPTION_MSSQLNATIVE.defaultRowPrefetch</code>
+        <attribute>200</attribute>
+      </attribute>
+      <attribute>
+        <code>EXTRA_OPTION_MSSQLNATIVE.readTimeout</code>
+        <attribute>60</attribute>
+      </attribute>
       <attribute>
         <code>FORCE_IDENTIFIERS_TO_LOWERCASE</code>
         <attribute>N</attribute>
@@ -457,10 +465,18 @@
         <code>FORCE_IDENTIFIERS_TO_UPPERCASE</code>
         <attribute>N</attribute>
       </attribute>
+      <attribute>
+        <code>INITIAL_POOL_SIZE</code>
+        <attribute>100</attribute>
+      </attribute>
       <attribute>
         <code>IS_CLUSTERED</code>
         <attribute>N</attribute>
       </attribute>
+      <attribute>
+        <code>MAXIMUM_POOL_SIZE</code>
+        <attribute>300</attribute>
+      </attribute>
       <attribute>
         <code>MSSQLUseIntegratedSecurity</code>
         <attribute>false</attribute>
@@ -491,7 +507,7 @@
       </attribute>
       <attribute>
         <code>USE_POOLING</code>
-        <attribute>N</attribute>
+        <attribute>Y</attribute>
       </attribute>
     </attributes>
   </connection>
@@ -499,7 +515,7 @@
     <hop>
       <from>表输入 2</from>
       <to>Elasticsearch bulk insert 2</to>
-      <enabled>N</enabled>
+      <enabled>Y</enabled>
     </hop>
   </order>
   <step>
@@ -508,15 +524,15 @@
     <description/>
     <distribute>Y</distribute>
     <custom_distribution/>
-    <copies>20</copies>
+    <copies>1</copies>
     <partitioning>
       <method>none</method>
       <schema_name/>
     </partitioning>
     <general>
-      <index>crm_order</index>
-      <type>_doc?routing=%{MemKey}</type>
-      <batchSize>3</batchSize>
+      <index>crm_order_routingphone</index>
+      <type>_doc?routing=%{MemPhone}</type>
+      <batchSize>100</batchSize>
       <timeout>100</timeout>
       <timeoutUnit>SECONDS</timeoutUnit>
       <isJson>N</isJson>
@@ -1117,9 +1133,8 @@ SELECT
 , ConfirmId	
 FROM Orders a
 where 1=1 and (AconsigneePhone1!='' or AconsigneePhone2!='')
-	  and CreationDate >  DATEADD(DAY,-2,GETDATE())  
---order by AConsigneePhone2 asc 
- --offset 0 rows fetch NEXT  2 rows only
+order by AConsigneePhone2 asc 
+ --offset 4600000 rows --fetch NEXT  2 rows only
  
 --)a
 --left join MemberData..DataImport as b on a.MemKey = b.Id
@@ -1134,7 +1149,7 @@ where 1=1 and (AconsigneePhone1!='' or AconsigneePhone2!='')
 --1=1  --and b.DataId is not null 
 --and MemKey >((${page}-1)*${pagesize})  and MemKey &lt;= ((${page}-1)*${pagesize} + (${pagesize})) 
 --and MemKey >0  and MemKey &lt;= 10
-order by AConsigneePhone2 asc 
+--order by AConsigneePhone2 asc 
 --offset ((${page}-1)*${pagesize}) rows fetch next (${pagesize}) rows only
 --offset ((389-1)*5000) rows --fetch next (5000) rows only
 </sql>