BOSS是业务运营支撑系统(Business Operation Support System)的简称,它包含客户管理(CBOSS)、产品管理(PBOSS)、资源管理、客户服务、渠道管理、计费、账务、结算、合作伙伴管理等多方面的功能。它对各种业务功能进行集中、统一的规划和整合,是一体化得、信息资源充分共享的支撑系统。
有具体业务中我们经常见NGBOSS类似字样,其全称为Next Generation Business Operation Support System,NG即表示“下一代”。NGBOSS系统与BOSS系统略有不同,NGBOSS和BOSS建设思路完全不一样,BOSS先建省级系统,然后再逐渐完善和补充,是打补丁方式建设,虽然BOSS很大,但是周边的系统不小,外挂系统也不小,是逐渐整合的系统。NGBOSS是自顶向下重构业务支撑网,这个不仅包括BOSS、CRM客服、包括网管,甚至包括DSMP。
VGOP(Value-added Service General Operation Platform)是中国移动的增值业务综合运营平台的简称,它从流程、功能、数据、接口四个方面全方位推动数据业务建设和运营从孤立走向整合,实现业务系统的规范化和水平化,加强数据业务以客户为中心的竞争力,沉淀业务综合运营的“软实力”。初期实现三大核心功能:业务能力互通和调度;客户行为诊断和分析;业务质量持续监控和优化。
for i,layer in enumerate(self.kModel.layers): if len(layer.weights) > 0: self.__target_layers.append(i)
def __retrieve_p_layers(self,input_size):
input = torch.randn(input_size)
input = Variable(input.unsqueeze(0))
hooks = []
def add_hooks(module):
def hook(module, input, output): if hasattr(module,"weight"): self.__source_layers.append(module)
if not isinstance(module, nn.ModuleList) and not isinstance(module,nn.Sequential) and module != self.pModel: hooks.append(module.register_forward_hook(hook))
self.pModel.apply(add_hooks)
self.pModel(input) for hook in hooks: hook.remove()
/** * Utility class for manipulating images. **/ public class ImageUtils { /** * Returns a transformation matrix from one reference frame into another. * Handles cropping (if maintaining aspect ratio is desired) and rotation. * * @param srcWidth Width of source frame. * @param srcHeight Height of source frame. * @param dstWidth Width of destination frame. * @param dstHeight Height of destination frame. * @param applyRotation Amount of rotation to apply from one frame to another. * Must be a multiple of 90. * @param maintainAspectRatio If true, will ensure that scaling in x and y remains constant, * cropping the image if necessary. * @return The transformation fulfilling the desired requirements. */ public static Matrix getTransformationMatrix( final int srcWidth, final int srcHeight, final int dstWidth, final int dstHeight, final int applyRotation, final boolean maintainAspectRatio) { final Matrix matrix = new Matrix();
if (applyRotation != 0) { // Translate so center of image is at origin. matrix.postTranslate(-srcWidth / 2.0f, -srcHeight / 2.0f);
// Rotate around origin. matrix.postRotate(applyRotation); }
// Account for the already applied rotation, if any, and then determine how // much scaling is needed for each axis. final boolean transpose = (Math.abs(applyRotation) + 90) % 180 == 0;
final int inWidth = transpose ? srcHeight : srcWidth; final int inHeight = transpose ? srcWidth : srcHeight;
// Apply scaling if necessary. if (inWidth != dstWidth || inHeight != dstHeight) { final float scaleFactorX = dstWidth / (float) inWidth; final float scaleFactorY = dstHeight / (float) inHeight;
if (maintainAspectRatio) { // Scale by minimum factor so that dst is filled completely while // maintaining the aspect ratio. Some image may fall off the edge. final float scaleFactor = Math.max(scaleFactorX, scaleFactorY); matrix.postScale(scaleFactor, scaleFactor); } else { // Scale exactly to fill dst from src. matrix.postScale(scaleFactorX, scaleFactorY); } }
if (applyRotation != 0) { // Translate back from origin centered reference to destination frame. matrix.postTranslate(dstWidth / 2.0f, dstHeight / 2.0f); }
return matrix; }
public static Bitmap processBitmap(Bitmap source,int size){
int image_height = source.getHeight(); int image_width = source.getWidth();
//Load the tensorflow inference library static { System.loadLibrary("tensorflow_inference"); }
//PATH TO OUR MODEL FILE AND NAMES OF THE INPUT AND OUTPUT NODES private String MODEL_PATH = "file:///android_asset/squeezenet.pb"; private String INPUT_NAME = "input_1"; private String OUTPUT_NAME = "output_1"; private TensorFlowInferenceInterface tf;
//ARRAY TO HOLD THE PREDICTIONS AND FLOAT VALUES TO HOLD THE IMAGE DATA float[] PREDICTIONS = new float[1000]; private float[] floatValues; private int[] INPUT_SIZE = {224,224,3};
set colsep','; //域(列)输出分隔符
set echo off; //不显示start启动的脚本中的每个sql命令,缺省为on
set feedback off; //不回显本次sql命令处理的记录条数,缺省为on
set heading off; //不输出域(列)标题,缺省为on
set pagesize 0; //输出每页行数,缺省为24,为了避免分页,可设定为0。
set termout off; //不显示脚本中的命令的执行结果,缺省为on
set trimout on; //去除标准输出每行的拖尾空格,缺省为off
set trimspool on; //去除重定向(spool)输出每行的拖尾空格,缺省为off
set term off; //不在屏幕上显示
set linesize 10000; //设置行宽,根据需要设置,默认100
set wrap off; //让它不要自动换行,当行的长度大于LINESIZE的时候,超出的部分会被截掉。
1
2
3
4
5
6
7
8
9
10
11
【2】PL/SQL下使用spool脚本导出txt
spool.sql脚本如下:
SPOOL D:\测试.txt
set echo off --不显示脚本中正在执行的SQL语句
set feedback off --不显示sql查询或修改行数
set term off --不在屏幕上显示
set heading off --不显示列
set linesize 1000; //设置行宽,根据需要设置,默认100
select AAB301||','||AAE002|| ',' ||AAC001|| ',' ||AAE252|| ',' ||AAE091|| ',' ||AAE020|| ',' ||AAE022 FROM JGCA; --需要导出的数据查询sql
SPOOL OFF
Real Time Messaging Protocol(简称 RTMP)是 Macromedia 开发的一套视频直播协议,现在属于 Adobe。和HLS一样都可以应用于视频直播,区别是RTMP基于flash无法在ios的浏览器里播放,但是实时性比HLS要好。所以一般使用这种协议来上传视频流,也就是视频流推送到服务器。
#新版consumer摈弃了对zookeeper的依赖,使用bootstrap.servers告诉consumer kafka server的位置
bootstrap.servers=ip-188-33-33-31.eu-central-1.compute.internal:9092,ip-188-33-33-32.eu-central-1.compute.internal:9092,ip-188-33-33-33.eu-central-1.compute.internal:9092
#如果使用旧版Consumer,则使用zookeeper.connect
#zookeeper.connect=ip-188-33-33-31.eu-central-1.compute.internal:2181,ip-188-33-33-32.eu-central-1.compute.internal:2181,ip-188-33-33-33.eu-central-1.compute.internal:2181
1.compute.internal:2181
#change the default 40000 to 50000
request.timeout.ms=50000
#hange default heartbeat interval from 3000 to 15000
heartbeat.interval.ms=30000
#change default session timeout from 30000 to 40000
session.timeout.ms=40000
#consumer group id
group.id=africaBetMirrorGroupTest
partition.assignment.strategy=org.apache.kafka.clients.consumer.RoundRobinAssignor
#restrict the max poll records from 2147483647 to 200000
max.poll.records=20000
#set receive buffer from default 64kB to 512kb
receive.buffer.bytes=524288
#set max amount of data per partition to override default 1048576
max.partition.fetch.bytes=5248576
#consumer timeout
#consumer.timeout.ms=5000
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
mirror-producer.properties的配置文件如下:
bootstrap.servers=10.120.241.146:9092,10.120.241.82:9092,10.120.241.110:9092
# name of the partitioner class for partitioning events; default partition spreads data randomly
#partitioner.class=
# specifies whether the messages are sent asynchronously (async) or synchronously (sync)
producer.type=sync
# specify the compression codec for all data generated: none, gzip, snappy, lz4.
# the old config values work as well: 0, 1, 2, 3 for none, gzip, snappy, lz4, respectively
compression.codec=none
# message encoder
serializer.class=kafka.serializer.DefaultEncoder
import com.ckm.kafka.producer.impl.KafkaProducerToolImpl;
import com.ckm.kafka.producer.inter.KafkaProducerTool;
/**
* Created by ckm on 2016/8/30.
*/
public class SimpleProducer {
public static void main(String[] args) {
KafkaProducerTool kafkaProducerTool = new KafkaProducerToolImpl();
int i = 0;
String message = "";
while (true) {
message = "test-simple-producer : " + i ++;
kafkaProducerTool.publishMessage("kafkamonitor-simpleproducer", message);
}
}
}
(1)在Topic List选项卡中,我们可以看到刚才新建的 kafkamonitor-simpleproducer Image may be NSFW. Clik here to view. (2)点开后,能看到有一个console-consumer正在消费该topic Image may be NSFW. Clik here to view. (3)继续进入该Consumer,可以查看该Consumer当前的消费状况 Image may be NSFW. Clik here to view. 这张图片的左上角显示了当前Topic的生产速率,右上角显示了当前Consumer的消费速率。 图片中还有三种颜色的线条,蓝色的表示当前Topic中的Message数目,灰色的表示当前Consumer消费的offset位置,红色的表示蓝色灰色的差值,即当前Consumer滞后于Producer的message数目。 (4)看一眼各partition中的message消费情况 Image may be NSFW. Clik here to view. 从上图可以看到,当前有3个Partition,每个Partition中的message数目分布很不均匀。这里可以与接下来的自定义Producer的情况进行一个对比。
import kafka.producer.Partitioner;
/**
* Created by ckm on 2016/8/30.
*/
public class TestPartitioner implements Partitioner {
public TestPartitioner() {
}
@Override
public int partition(Object key, int numPartitions) {
int intKey = (int) key;
return intKey % numPartitions;
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
3、Producer代码
将自定义的Partitioner设置到Producer,其他调用过程和二中类似。
import com.ckm.kafka.producer.impl.KafkaProducerToolImpl;
import com.ckm.kafka.producer.inter.KafkaProducerTool;
/**
* Created by ckm on 2016/8/30.
*/
public class PartitionedProducer {
public static void main(String[] args) {
KafkaProducerTool kafkaProducerTool = new KafkaProducerToolImpl();
kafkaProducerTool.getProducerProperties().put("partitioner.class", "TestPartitioner");
int i = 0;
String message = "";
while (true) {
message = "test-partitioner-producer : " + i;
System.out.println(message);
kafkaProducerTool.publishPartitionedMessage("kafkamonitor-partitionedproducer", i + "", message);
i ++;
}
}
}
其他页面与上面的类似,这里只观察一下每个partition中的message数目与第二节中的对比。可以看到这里每个Partition中message分别是很均匀的。 Image may be NSFW. Clik here to view.
注意事项: 注意这里有一个坑,默认情况下Producer往一个不存在的Topic发送message时会自动创建这个Topic。由于在这个封装中,有同时传递message和topic的情况,如果调用方法时传入的参数反了,将会在Kafka集群中自动创建Topic。在正常情况下,应该是先把Topic根据需要创建好,然后Producer往该Topic发送Message,最好把Kafka这个默认自动创建Topic的功能关掉。 那么,假设真的不小心创建了多余的Topic,在删除时,会出现“marked for deletion”提示,只是将该topic标记为删除,使用list命令仍然能看到。如果需要调整这两个功能的话,在server.properties中配置如下两个参数:
参数
默认值
作用
auto.create.topics.enable
true
Enable auto creation of topic on the server
delete.topic.enable
false
Enables delete topic. Delete topic through the admin tool will have no effect if this config is turned off
if__name__ =="__main__":
docs =\
["The Neatest Little Guide to Stock Market Investing","Investing For Dummies, 4th Edition","The Little Book of Common Sense Investing: The Only Way to Guarantee Your Fair Share of Stock Market Returns","The Little Book of Value Investing","Value Investing: From Graham to Buffett and Beyond","Rich Dad's Guide to Investing: What the Rich Invest in, That the Poor and the Middle Class Do Not!","Investing in Real Estate, 5th Edition","Stock Investing For Dummies","Rich Dad's Advisors: The ABC's of Real Estate Investing: The Secrets of Finding Hidden Profits Most Investors Miss"]
stopwords = ['and','edition','for','in','little','of','the','to']
ignorechars = string.punctuation
lsaDemo = LSA(stopwords,ignorechars)fordindocs:
lsaDemo.parseDoc(d)
lsaDemo.buildMwd()
lsaDemo.printMwd()
#将二值化后的数组转化成网格特征统计图 def get_features(array): #拿到数组的高度和宽度 h, w = array.shape data = [] for x in range(0, w/4): offset_y = x * 4 temp = [] for y in range(0,h/4): offset_x = y * 4 #统计每个区域的1的值 temp.append(sum(sum(array[0+offset_y:4+offset_y,0+offset_x:4+offset_x]))) data.append(temp) return np.asarray(data)
#将二值化后的数组转化成网格特征统计图 def get_features(array): #拿到数组的高度和宽度 h, w = array.shape data = [] for x in range(0, w/4): offset_y = x * 4 temp = [] for y in range(0,h/4): offset_x = y * 4 #统计每个区域的1的值 temp.append(sum(sum(array[0+offset_y:4+offset_y,0+offset_x:4+offset_x]))) data.append(temp) return np.asarray(data)
import org.apache.spark.ml.feature.{HashingTF, IDF, Tokenizer}
val sentenceData = spark.createDataFrame(Seq(
(0, "Hi I heard about Spark"),
(0, "I wish Java could use case classes"),
(1, "Logistic regression models are neat")
)).toDF("label", "sentence")
val tokenizer = new Tokenizer().setInputCol("sentence").setOutputCol("words")
val wordsData = tokenizer.transform(sentenceData)
val hashingTF = new HashingTF()
.setInputCol("words").setOutputCol("rawFeatures").setNumFeatures(20)
val featurizedData = hashingTF.transform(wordsData)
// alternatively, CountVectorizer can also be used to get term frequency vectors
val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features")
val idfModel = idf.fit(featurizedData)
val rescaledData = idfModel.transform(featurizedData)
rescaledData.select("features", "label").take(3).foreach(println)
import org.apache.spark.ml.feature.Word2Vec
// Input data: Each row is a bag of words from a sentence or document.
val documentDF = spark.createDataFrame(Seq(
"Hi I heard about Spark".split(" "),"I wish Java could use case classes".split(" "),"Logistic regression models are neat".split(" ")
).map(Tuple1.apply)).toDF("text")
// Learn a mapping from words to Vectors.
val word2Vec = new Word2Vec()
.setInputCol("text")
.setOutputCol("result")
.setVectorSize(3)
.setMinCount(0)
val model = word2Vec.fit(documentDF)
val result = model.transform(documentDF)
result.select("result").take(3).foreach(println)
import org.apache.spark.ml.feature.{CountVectorizer, CountVectorizerModel}
val df = spark.createDataFrame(Seq(
(0, Array("a", "b", "c")),
(1, Array("a", "b", "b", "c", "a"))
)).toDF("id", "words")
// fit a CountVectorizerModel from the corpus
val cvModel: CountVectorizerModel = new CountVectorizer()
.setInputCol("words")
.setOutputCol("features")
.setVocabSize(3)
.setMinDF(2)
.fit(df)
// alternatively, define CountVectorizerModel with a-priori vocabulary
val cvm = new CountVectorizerModel(Array("a", "b", "c"))
.setInputCol("words")
.setOutputCol("features")
cvModel.transform(df).select("features").show()
ma_list = [5, 20 ,60]
for ma in ma_list:
data['MA' + str(ma)] = pd.rolling_mean(data.close, ma)
for ma in ma_list:
data['EMA' + str(ma)] = pd.ewma(data.close, span=ma)
data.to_csv("EWMA.csv")