@article{Chen_Zhang_Zhao_2020, title={Gradient Method for Continuous Influence Maximization with Budget-Saving Considerations}, volume={34}, url={https://ojs.aaai.org/index.php/AAAI/article/view/5332}, DOI={10.1609/aaai.v34i01.5332}, abstractNote={<p>Continuous influence maximization (CIM) generalizes the original influence maximization by incorporating general marketing strategies: a marketing strategy mix is a vector <b><i><i>x</i></i></b> = (<i>x</i><sub>1</sub>, <i>…</i>, <i>x</i><sub><i>d</i></sub>) such that for each node <i>v</i> in a social network, <i>v</i> could be activated as a seed of diffusion with probability <i>h</i><sub><i>v</i></sub>(<b><i><i>x</i></i></b>), where <i>h</i><sub><i>v</i></sub> is a strategy activation function satisfying DR-submodularity. CIM is the task of selecting a strategy mix <b><i><i>x</i></i></b> with constraint ∑<sub><i>i</i></sub><i>x</i><sub><i>i</i></sub> ≤ <i>k</i> where <i>k</i> is a budget constraint, such that the total number of activated nodes after the diffusion process, called influence spread and denoted as <i>g</i>(<b><i><i>x</i></i></b>), is maximized. In this paper, we extend CIM to consider budget saving, that is, each strategy mix <b><i><i>x</i></i></b> has a cost <i>c</i>(<b><i><i>x</i></i></b>) where <i>c</i> is a convex cost function, and we want to maximize the balanced sum <i>g</i>(<b><i><i>x</i></i></b>) + <i>λ</i>(<i>k</i> − <i>c</i>(<b><i><i>x</i></i></b>)) where λ is a balance parameter, subject to the constraint of <i>c</i>(<b><i><i>x</i></i></b>) ≤ <i>k</i>. We denote this problem as CIM-BS. The objective function of CIM-BS is neither monotone, nor DR-submodular or concave, and thus neither the greedy algorithm nor the standard result on gradient method could be directly applied. Our key innovation is the combination of the gradient method with reverse influence sampling to design algorithms that solve CIM-BS: For the general case, we give an algorithm that achieves (½ − <i>ε</i>)-approximation, and for the case of independent strategy activations, we present an algorithm that achieves (1 − 1/<i>e</i> − <i>ε</i>) approximation.</p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Wei and Zhang, Weizhong and Zhao, Haoyu}, year={2020}, month={Apr.}, pages={43-50} }